summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--contrib/artwork/oe.svg80
-rw-r--r--meta-selftest/conf/layer.conf2
-rw-r--r--meta-selftest/lib/oeqa/runtime/cases/virgl.py5
-rw-r--r--meta-selftest/recipes-test/aspell/aspell_0.60.8.bbappend (renamed from meta-selftest/recipes-test/aspell/aspell_0.60.7.bbappend)0
-rw-r--r--meta-selftest/recipes-test/multiconfig/multiconfig-test-parse.bb11
-rw-r--r--meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb2
-rw-r--r--meta-skeleton/conf/layer.conf2
-rw-r--r--meta/classes/archiver.bbclass5
-rw-r--r--meta/classes/base.bbclass9
-rw-r--r--meta/classes/cmake.bbclass3
-rw-r--r--meta/classes/core-image.bbclass1
-rw-r--r--meta/classes/cve-check.bbclass13
-rw-r--r--meta/classes/devtool-source.bbclass8
-rw-r--r--meta/classes/externalsrc.bbclass2
-rw-r--r--meta/classes/grub-efi-cfg.bbclass1
-rw-r--r--meta/classes/grub-efi.bbclass42
-rw-r--r--meta/classes/icecc.bbclass39
-rw-r--r--meta/classes/image-live.bbclass2
-rw-r--r--meta/classes/image.bbclass2
-rw-r--r--meta/classes/insane.bbclass19
-rw-r--r--meta/classes/kernel-devicetree.bbclass20
-rw-r--r--meta/classes/kernel-fitimage.bbclass45
-rw-r--r--meta/classes/libc-package.bbclass15
-rw-r--r--meta/classes/license.bbclass25
-rw-r--r--meta/classes/license_image.bbclass10
-rw-r--r--meta/classes/live-vm-common.bbclass33
-rw-r--r--meta/classes/meson.bbclass2
-rw-r--r--meta/classes/multilib.bbclass2
-rw-r--r--meta/classes/nativesdk.bbclass2
-rw-r--r--meta/classes/package.bbclass97
-rw-r--r--meta/classes/package_rpm.bbclass5
-rw-r--r--meta/classes/packagegroup.bbclass2
-rw-r--r--meta/classes/populate_sdk_base.bbclass3
-rw-r--r--meta/classes/populate_sdk_ext.bbclass38
-rw-r--r--meta/classes/report-error.bbclass23
-rw-r--r--meta/classes/reproducible_build.bbclass13
-rw-r--r--meta/classes/rm_work.bbclass9
-rw-r--r--meta/classes/rootfs-postcommands.bbclass8
-rw-r--r--meta/classes/sanity.bbclass17
-rw-r--r--meta/classes/sstate.bbclass28
-rw-r--r--meta/classes/staging.bbclass16
-rw-r--r--meta/classes/systemd-boot.bbclass31
-rw-r--r--meta/classes/systemd.bbclass7
-rw-r--r--meta/classes/testimage.bbclass18
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toolchain-scripts.bbclass1
-rw-r--r--meta/classes/uninative.bbclass2
-rw-r--r--meta/conf/bitbake.conf2
-rw-r--r--meta/conf/distro/include/maintainers.inc3
-rw-r--r--meta/conf/distro/include/ptest-packagelists.inc2
-rw-r--r--meta/conf/distro/include/tcmode-default.inc2
-rw-r--r--meta/conf/distro/include/yocto-uninative.inc10
-rw-r--r--meta/conf/image-uefi.conf16
-rw-r--r--meta/conf/layer.conf6
-rw-r--r--meta/conf/local.conf.sample14
-rw-r--r--meta/conf/machine/include/riscv/qemuriscv.inc13
-rw-r--r--meta/conf/machine/include/tune-cortexa57-cortexa53.inc18
-rw-r--r--meta/conf/machine/qemuriscv64.conf2
-rw-r--r--meta/conf/multilib.conf2
-rw-r--r--meta/conf/sanity.conf2
-rw-r--r--meta/lib/oe/buildhistory_analysis.py2
-rw-r--r--meta/lib/oe/classextend.py2
-rw-r--r--meta/lib/oe/copy_buildsystem.py8
-rw-r--r--meta/lib/oe/lsb.py8
-rw-r--r--meta/lib/oe/package_manager.py10
-rw-r--r--meta/lib/oe/packagedata.py5
-rw-r--r--meta/lib/oe/sdk.py4
-rw-r--r--meta/lib/oe/sstatesig.py33
-rw-r--r--meta/lib/oe/terminal.py2
-rw-r--r--meta/lib/oe/types.py4
-rw-r--r--meta/lib/oeqa/core/case.py49
-rw-r--r--meta/lib/oeqa/core/decorator/data.py18
-rw-r--r--meta/lib/oeqa/core/utils/concurrencytest.py33
-rw-r--r--meta/lib/oeqa/runtime/cases/df.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/dnf.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/opkg.py4
-rw-r--r--meta/lib/oeqa/runtime/cases/systemd.py16
-rw-r--r--meta/lib/oeqa/runtime/context.py2
-rw-r--r--meta/lib/oeqa/sdk/cases/buildepoxy.py4
-rw-r--r--meta/lib/oeqa/selftest/case.py17
-rw-r--r--meta/lib/oeqa/selftest/cases/bblayers.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/bbtests.py20
-rw-r--r--meta/lib/oeqa/selftest/cases/binutils.py11
-rw-r--r--meta/lib/oeqa/selftest/cases/devtool.py15
-rw-r--r--meta/lib/oeqa/selftest/cases/gcc.py9
-rw-r--r--meta/lib/oeqa/selftest/cases/glibc.py7
-rw-r--r--meta/lib/oeqa/selftest/cases/imagefeatures.py9
-rw-r--r--meta/lib/oeqa/selftest/cases/incompatible_lic.py52
-rw-r--r--meta/lib/oeqa/selftest/cases/multiconfig.py45
-rw-r--r--meta/lib/oeqa/selftest/cases/oescripts.py64
-rw-r--r--meta/lib/oeqa/selftest/cases/recipetool.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py22
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/signing.py24
-rw-r--r--meta/lib/oeqa/selftest/cases/wic.py6
-rw-r--r--meta/recipes-bsp/acpid/acpid.inc6
-rw-r--r--meta/recipes-bsp/formfactor/formfactor_0.0.bb3
-rw-r--r--meta/recipes-bsp/gnu-efi/gnu-efi/gnu-efi-3.0.9-fix-clang-build.patch24
-rw-r--r--meta/recipes-bsp/gnu-efi/gnu-efi/parallel-make-archives.patch18
-rw-r--r--meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb (renamed from meta/recipes-bsp/gnu-efi/gnu-efi_3.0.9.bb)5
-rw-r--r--meta/recipes-bsp/grub/grub-bootconf_1.00.bb13
-rw-r--r--meta/recipes-bsp/grub/grub-efi_2.04.bb19
-rw-r--r--meta/recipes-bsp/grub/grub_2.04.bb2
-rw-r--r--meta/recipes-bsp/opensbi/opensbi_0.5.bb (renamed from meta/recipes-bsp/opensbi/opensbi_0.4.bb)2
-rw-r--r--meta/recipes-bsp/u-boot/files/0001-include-env.h-Ensure-ulong-is-defined.patch31
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-common.inc9
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-fw-utils_2019.10.bb (renamed from meta/recipes-bsp/u-boot/u-boot-fw-utils_2019.07.bb)2
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-tools_2019.10.bb (renamed from meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb)0
-rw-r--r--meta/recipes-bsp/u-boot/u-boot_2019.10.bb (renamed from meta/recipes-bsp/u-boot/u-boot_2019.07.bb)0
-rw-r--r--meta/recipes-bsp/usbinit/usbinit.bb4
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch64
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch60
-rw-r--r--meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch670
-rw-r--r--meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch278
-rw-r--r--meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch512
-rw-r--r--meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch911
-rw-r--r--meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch80
-rw-r--r--meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch140
-rw-r--r--meta/recipes-connectivity/bind/bind_9.11.5-P4.bb10
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5.inc3
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/0001-Makefile.am-Fix-a-race-issue-for-tools.patch28
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/0001-tools-btpclient.c-include-signal.h.patch30
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2018-10910.patch726
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/out-of-tree.patch2
-rw-r--r--meta/recipes-connectivity/connman/connman.inc2
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp.inc2
-rw-r--r--meta/recipes-connectivity/iproute2/iproute2.inc5
-rw-r--r--meta/recipes-connectivity/iw/iw_5.3.bb (renamed from meta/recipes-connectivity/iw/iw_5.0.1.bb)5
-rw-r--r--meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch29
-rw-r--r--meta/recipes-connectivity/libpcap/libpcap_1.9.1.bb (renamed from meta/recipes-connectivity/libpcap/libpcap_1.9.0.bb)7
-rw-r--r--meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Fix-include-order-between-config.h-and-stat.h.patch156
-rw-r--r--meta/recipes-connectivity/nfs-utils/nfs-utils/nfsserver2
-rw-r--r--meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb3
-rw-r--r--meta/recipes-connectivity/ofono/ofono_1.30.bb9
-rw-r--r--meta/recipes-connectivity/ofono/ofono_1.31.bb (renamed from meta/recipes-connectivity/ofono/ofono.inc)37
-rw-r--r--meta/recipes-connectivity/openssh/openssh/0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch40
-rw-r--r--meta/recipes-connectivity/openssh/openssh_8.0p1.bb1
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Fix-broken-change-from-b3d113e.patch35
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Fix-build-error-for-aarch64-big-endian.patch43
-rw-r--r--meta/recipes-connectivity/openssl/openssl_1.1.1d.bb (renamed from meta/recipes-connectivity/openssl/openssl_1.1.1c.bb)14
-rw-r--r--meta/recipes-connectivity/socat/socat_1.7.3.3.bb7
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch82
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb3
-rw-r--r--meta/recipes-core/base-files/base-files_3.0.14.bb2
-rw-r--r--meta/recipes-core/busybox/busybox-inittab_1.31.0.bb19
-rw-r--r--meta/recipes-core/busybox/busybox.inc9
-rw-r--r--meta/recipes-core/busybox/busybox_1.31.0.bb3
-rwxr-xr-xmeta/recipes-core/busybox/files/mount.busybox3
-rwxr-xr-xmeta/recipes-core/busybox/files/umount.busybox3
-rw-r--r--meta/recipes-core/coreutils/coreutils_8.31.bb6
-rw-r--r--meta/recipes-core/dbus/dbus/dbus-1.init4
-rw-r--r--meta/recipes-core/dbus/dbus_1.12.16.bb2
-rw-r--r--meta/recipes-core/ell/ell_0.26.bb (renamed from meta/recipes-core/ell/ell_0.22.bb)4
-rw-r--r--meta/recipes-core/expat/expat_2.2.9.bb (renamed from meta/recipes-core/expat/expat_2.2.7.bb)4
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-Do-not-write-bindir-into-pkg-config-files.patch31
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-Fix-DATADIRNAME-on-uclibc-Linux.patch34
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-Set-host_machine-correctly-when-building-with-mingw3.patch22
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-meson-Run-atomics-test-on-clang-as-well.patch31
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch62
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-meson.build-do-not-hardcode-linux-as-the-host-system.patch27
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch8
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/uclibc_musl_translation.patch22
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0_2.62.1.bb (renamed from meta/recipes-core/glib-2.0/glib-2.0_2.60.6.bb)9
-rw-r--r--meta/recipes-core/glib-2.0/glib.inc2
-rw-r--r--meta/recipes-core/glib-networking/glib-networking_2.62.1.bb (renamed from meta/recipes-core/glib-networking/glib-networking_2.60.3.bb)6
-rw-r--r--meta/recipes-core/glibc/glibc-package.inc37
-rw-r--r--meta/recipes-core/glibc/glibc-testsuite_2.30.bb7
-rw-r--r--meta/recipes-core/images/build-appliance-image_15.0.0.bb2
-rw-r--r--meta/recipes-core/initrdscripts/initramfs-framework/rootfs14
-rw-r--r--meta/recipes-core/initscripts/initscripts-1.0/alignment.sh (renamed from meta/recipes-core/initscripts/initscripts-1.0/arm/alignment.sh)0
-rw-r--r--meta/recipes-core/initscripts/initscripts_1.0.bb1
-rw-r--r--meta/recipes-core/musl/musl/0001-riscv-Define-sigcontext-again.patch48
-rw-r--r--meta/recipes-core/musl/musl_git.bb3
-rw-r--r--meta/recipes-core/ncurses/files/0001-ncurses-selective-backport-of-20191012-patch.patch169
-rw-r--r--meta/recipes-core/ncurses/ncurses_6.1+20190803.bb1
-rw-r--r--meta/recipes-core/ovmf/ovmf_git.bb2
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb2
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-core-tools-debug.bb2
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-self-hosted.bb2
-rwxr-xr-xmeta/recipes-core/psplash/files/psplash-init2
-rw-r--r--meta/recipes-core/readline/readline-8.0/rl-native.map12
-rw-r--r--meta/recipes-core/readline/readline.inc5
-rw-r--r--meta/recipes-core/systemd/systemd-boot_243.bb (renamed from meta/recipes-core/systemd/systemd-boot_242.bb)15
-rw-r--r--meta/recipes-core/systemd/systemd-conf_243.bb (renamed from meta/recipes-core/systemd/systemd-conf_242.bb)0
-rwxr-xr-xmeta/recipes-core/systemd/systemd-systemctl/systemctl2
-rw-r--r--meta/recipes-core/systemd/systemd.inc4
-rw-r--r--meta/recipes-core/systemd/systemd/0001-Replace-the-legacy-ULONG_LONG_MAX-with-the-C99-ULLON.patch50
-rw-r--r--meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch17
-rw-r--r--meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch148
-rw-r--r--meta/recipes-core/systemd/systemd/0001-src-udev-udev-event.c-must-include-sys-wait.h.patch35
-rw-r--r--meta/recipes-core/systemd/systemd/0001-unit-file.c-consider-symlink-on-filesystems-like-NFS.patch42
-rw-r--r--meta/recipes-core/systemd/systemd/0002-src-login-brightness.c-include-sys-wait.h.patch25
-rw-r--r--meta/recipes-core/systemd/systemd/0002-use-lnr-wrapper-instead-of-looking-for-relative-opti.patch23
-rw-r--r--meta/recipes-core/systemd/systemd/0003-src-basic-copy.c-include-signal.h.patch27
-rw-r--r--meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch15
-rw-r--r--meta/recipes-core/systemd/systemd/0004-rules-whitelist-hd-devices.patch11
-rw-r--r--meta/recipes-core/systemd/systemd/0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch54
-rw-r--r--meta/recipes-core/systemd/systemd/0005-rules-watch-metadata-changes-in-ide-devices.patch16
-rw-r--r--meta/recipes-core/systemd/systemd/0005-src-basic-missing.h-check-for-missing-strndupa.patch139
-rw-r--r--meta/recipes-core/systemd/systemd/0006-Include-netinet-if_ether.h.patch195
-rw-r--r--meta/recipes-core/systemd/systemd/0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch26
-rw-r--r--meta/recipes-core/systemd/systemd/0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch12
-rw-r--r--meta/recipes-core/systemd/systemd/0023-socket-util.h-include-string.h.patch30
-rw-r--r--meta/recipes-core/systemd/systemd/0025-fs-utilh-add-missing-sys-stat-include.patch30
-rw-r--r--meta/recipes-core/systemd/systemd_243.bb (renamed from meta/recipes-core/systemd/systemd_242.bb)25
-rw-r--r--meta/recipes-core/udev/udev-extraconf/mount.sh6
-rw-r--r--meta/recipes-core/util-linux/util-linux/0001-lsblk-force-to-print-PKNAME-for-partition.patch36
-rw-r--r--meta/recipes-core/util-linux/util-linux_2.34.bb1
-rw-r--r--meta/recipes-devtools/apt/apt.inc3
-rw-r--r--meta/recipes-devtools/binutils/binutils-2.32.inc9
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch99
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch51
-rw-r--r--meta/recipes-devtools/bison/bison_3.4.2.bb (renamed from meta/recipes-devtools/bison/bison_3.4.1.bb)4
-rw-r--r--meta/recipes-devtools/btrfs-tools/btrfs-tools_5.3.bb (renamed from meta/recipes-devtools/btrfs-tools/btrfs-tools_5.2.1.bb)2
-rw-r--r--meta/recipes-devtools/cmake/cmake-native_3.15.3.bb (renamed from meta/recipes-devtools/cmake/cmake-native_3.15.2.bb)0
-rw-r--r--meta/recipes-devtools/cmake/cmake.inc10
-rw-r--r--meta/recipes-devtools/cmake/cmake_3.15.3.bb (renamed from meta/recipes-devtools/cmake/cmake_3.15.2.bb)0
-rw-r--r--meta/recipes-devtools/createrepo-c/createrepo-c_0.15.1.bb (renamed from meta/recipes-devtools/createrepo-c/createrepo-c_0.15.0.bb)6
-rw-r--r--meta/recipes-devtools/dejagnu/dejagnu_1.6.2.bb2
-rw-r--r--meta/recipes-devtools/distcc/distcc_3.3.3.bb38
-rw-r--r--meta/recipes-devtools/distcc/files/default2
-rw-r--r--meta/recipes-devtools/distcc/files/distccmon-gnome.desktop12
-rw-r--r--meta/recipes-devtools/distcc/files/fix-gnome.patch122
-rw-r--r--meta/recipes-devtools/distcc/files/separatebuilddir.patch42
-rw-r--r--meta/recipes-devtools/dnf/dnf_4.2.2.bb5
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.4.bb (renamed from meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.3.bb)2
-rw-r--r--meta/recipes-devtools/elfutils/elfutils_0.177.bb (renamed from meta/recipes-devtools/elfutils/elfutils_0.176.bb)22
-rw-r--r--meta/recipes-devtools/elfutils/files/0001-ppc_initreg.c-Incliude-asm-ptrace.h-for-pt_regs-defi.patch32
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/0001-Ignore-differences-between-mips-machine-identifiers.patch12
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/0001-fix-compile-failure-with-debian-patches.patch48
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/0002-Add-support-for-mips64-abis-in-mips_retval.c.patch13
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/0003-Add-mips-n64-relocation-format-hack.patch60
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/arm_backend.diff39
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/disable_werror.patch19
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/hppa_backend.diff57
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/hurd_path.patch17
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/ignore_strmerge.diff15
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/kfreebsd_path.patch13
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/mips_backend.diff42
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/mips_cfi.patch131
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/mips_readelf_w.patch22
-rw-r--r--meta/recipes-devtools/elfutils/files/debian/testsuite-ignore-elflint.diff20
-rw-r--r--meta/recipes-devtools/elfutils/files/musl-libs.patch39
-rw-r--r--meta/recipes-devtools/elfutils/files/musl-obstack-fts.patch29
-rw-r--r--meta/recipes-devtools/elfutils/files/musl-utils.patch37
-rw-r--r--meta/recipes-devtools/expect/expect/0001-exp_main_sub.c-Use-PATH_MAX-for-path.patch55
-rw-r--r--meta/recipes-devtools/expect/expect_5.45.4.bb7
-rw-r--r--meta/recipes-devtools/file/file/CVE-2019-18218.patch55
-rw-r--r--meta/recipes-devtools/file/file_5.37.bb14
-rw-r--r--meta/recipes-devtools/flex/flex_2.6.0.bb2
-rw-r--r--meta/recipes-devtools/gcc/gcc-8.3/CVE-2019-14250.patch44
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.2.inc3
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.2/CVE-2019-15847_1.patch521
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.2/CVE-2019-15847_2.patch77
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.2/CVE-2019-15847_3.patch62
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross.inc10
-rw-r--r--meta/recipes-devtools/gdb/gdb-8.3.1.inc (renamed from meta/recipes-devtools/gdb/gdb-8.3.inc)5
-rw-r--r--meta/recipes-devtools/gdb/gdb-cross-canadian_8.3.1.bb (renamed from meta/recipes-devtools/gdb/gdb-cross-canadian_8.3.bb)0
-rw-r--r--meta/recipes-devtools/gdb/gdb-cross_8.3.1.bb (renamed from meta/recipes-devtools/gdb/gdb-cross_8.3.bb)0
-rw-r--r--meta/recipes-devtools/gdb/gdb.inc1
-rw-r--r--meta/recipes-devtools/gdb/gdb/CVE-2017-9778.patch98
-rw-r--r--meta/recipes-devtools/gdb/gdb_8.3.1.bb (renamed from meta/recipes-devtools/gdb/gdb_8.3.bb)0
-rw-r--r--meta/recipes-devtools/git/git.inc18
-rw-r--r--meta/recipes-devtools/git/git_2.23.0.bb11
-rw-r--r--meta/recipes-devtools/git/git_2.24.0.bb11
-rw-r--r--meta/recipes-devtools/go/go-1.12.inc1
-rw-r--r--meta/recipes-devtools/go/go-1.12/0001-release-branch.go1.12-security-net-textproto-don-t-n.patch163
-rw-r--r--meta/recipes-devtools/i2c-tools/i2c-tools_4.1.bb1
-rw-r--r--meta/recipes-devtools/json-c/json-c_0.13.1.bb18
-rw-r--r--meta/recipes-devtools/libdnf/libdnf_0.28.1.bb2
-rw-r--r--meta/recipes-devtools/libmodulemd/libmodulemd-v1/0001-spec_tmpl.sh-use-bin-sh-not-usr-bin-sh.patch (renamed from meta/recipes-devtools/libmodulemd/libmodulemd/0001-spec_tmpl.sh-use-bin-sh-not-usr-bin-sh.patch)6
-rw-r--r--meta/recipes-devtools/libmodulemd/libmodulemd-v1_git.bb (renamed from meta/recipes-devtools/libmodulemd/libmodulemd_git.bb)11
-rw-r--r--meta/recipes-devtools/libmodulemd/libmodulemd/0001-v1-meson.build-explicitly-specify-the-v1-library-in-.patch28
-rw-r--r--meta/recipes-devtools/libmodulemd/libmodulemd/0002-modulemd-v1-meson.build-do-not-generate-gir-or-gtkdo.patch71
-rw-r--r--meta/recipes-devtools/librepo/librepo_1.10.6.bb (renamed from meta/recipes-devtools/librepo/librepo_1.10.5.bb)2
-rw-r--r--meta/recipes-devtools/llvm/llvm/0006-llvm-TargetLibraryInfo-Undefine-libc-functions-if-th.patch12
-rw-r--r--meta/recipes-devtools/llvm/llvm/0007-llvm-allow-env-override-of-exe-path.patch11
-rw-r--r--meta/recipes-devtools/llvm/llvm_git.bb25
-rw-r--r--meta/recipes-devtools/makedevs/makedevs/makedevs.c10
-rw-r--r--meta/recipes-devtools/meson/meson.inc13
-rw-r--r--meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch8
-rw-r--r--meta/recipes-devtools/meson/meson/0001-environment.py-detect-windows-also-if-the-system-str.patch29
-rw-r--r--meta/recipes-devtools/meson/meson/0001-gtkdoc-fix-issues-that-arise-when-cross-compiling.patch8
-rw-r--r--meta/recipes-devtools/meson/meson/0001-mesonbuild-environment.py-do-not-determine-whether-a.patch28
-rw-r--r--meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch4
-rw-r--r--meta/recipes-devtools/meson/meson/0003-native_bindir.patch20
-rw-r--r--meta/recipes-devtools/meson/meson/dbc9e971bd320f3df15c1ee74f54858e6792b183.patch95
-rw-r--r--meta/recipes-devtools/meson/meson/vala-cross-compile.patch50
-rw-r--r--meta/recipes-devtools/meson/meson_0.52.0.bb (renamed from meta/recipes-devtools/meson/meson_0.51.2.bb)1
-rw-r--r--meta/recipes-devtools/meson/nativesdk-meson_0.52.0.bb (renamed from meta/recipes-devtools/meson/nativesdk-meson_0.51.2.bb)0
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils/0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch44
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils_0.4.1.bb1
-rw-r--r--meta/recipes-devtools/opkg/opkg_0.4.1.bb4
-rw-r--r--meta/recipes-devtools/patch/patch/0001-Invoke-ed-directly-instead-of-using-the-shell.patch4
-rw-r--r--meta/recipes-devtools/perl/perl_5.30.0.bb23
-rw-r--r--meta/recipes-devtools/pseudo/files/0001-Add-statx.patch106
-rw-r--r--meta/recipes-devtools/pseudo/pseudo_git.bb1
-rw-r--r--meta/recipes-devtools/python-numpy/files/aarch64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/aarch64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/arm/config.h21
-rw-r--r--meta/recipes-devtools/python-numpy/files/arm/numpyconfig.h17
-rw-r--r--meta/recipes-devtools/python-numpy/files/armeb/config.h21
-rw-r--r--meta/recipes-devtools/python-numpy/files/armeb/numpyconfig.h17
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32eb/_numpyconfig.h31
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32eb/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32el/_numpyconfig.h31
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32el/config.h138
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64eb/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64eb/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64el/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64el/config.h138
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32eb/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32eb/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32el/config.h21
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32el/numpyconfig.h18
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/riscv64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/riscv64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86-64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86-64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86/config.h108
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86/numpyconfig.h24
-rw-r--r--meta/recipes-devtools/python-numpy/python-numpy.inc68
-rw-r--r--meta/recipes-devtools/python/python-async.inc2
-rw-r--r--meta/recipes-devtools/python/python-native/debug.patch32
-rw-r--r--meta/recipes-devtools/python/python-native_2.7.16.bb1
-rw-r--r--meta/recipes-devtools/python/python-setuptools.inc4
-rw-r--r--meta/recipes-devtools/python/python-setuptools_41.4.0.bb (renamed from meta/recipes-devtools/python/python-setuptools_41.2.0.bb)0
-rw-r--r--meta/recipes-devtools/python/python-smmap.inc2
-rw-r--r--meta/recipes-devtools/python/python/0001-2.7-bpo-34155-Dont-parse-domains-containing-GH-13079.patch90
-rw-r--r--meta/recipes-devtools/python/python/0001-2.7-bpo-38243-Escape-the-server-title-of-DocXMLRPCSe.patch101
-rw-r--r--meta/recipes-devtools/python/python/bpo-36742-cve-2019-10160.patch81
-rw-r--r--meta/recipes-devtools/python/python3-dbus_1.2.12.bb (renamed from meta/recipes-devtools/python/python3-dbus_1.2.10.bb)8
-rw-r--r--meta/recipes-devtools/python/python3-pip_19.3.1.bb (renamed from meta/recipes-devtools/python/python3-pip_19.2.3.bb)5
-rw-r--r--meta/recipes-devtools/python/python3-pygobject_3.34.0.bb (renamed from meta/recipes-devtools/python/python3-pygobject_3.32.2.bb)4
-rw-r--r--meta/recipes-devtools/python/python3-setuptools_41.4.0.bb (renamed from meta/recipes-devtools/python/python3-setuptools_41.2.0.bb)0
-rw-r--r--meta/recipes-devtools/python/python3-subunit_1.3.0.bb2
-rw-r--r--meta/recipes-devtools/python/python3/0001-Do-not-hardcode-lib-as-location-for-site-packages-an.patch2
-rw-r--r--meta/recipes-devtools/python/python3/0001-distutils-sysconfig-append-STAGING_LIBDIR-python-sys.patch10
-rw-r--r--meta/recipes-devtools/python/python3/0001-main.c-if-OEPYTHON3HOME-is-set-use-instead-of-PYTHON.patch10
-rw-r--r--meta/recipes-devtools/python/python3/0001-python3-use-cc_basename-to-replace-CC-for-checking-c.patch2
-rw-r--r--meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch31
-rw-r--r--meta/recipes-devtools/python/python3/12-distutils-prefix-is-inside-staging-area.patch28
-rw-r--r--meta/recipes-devtools/python/python3/python3-manifest.json26
-rw-r--r--meta/recipes-devtools/python/python3_3.7.5.bb (renamed from meta/recipes-devtools/python/python3_3.7.4.bb)40
-rw-r--r--meta/recipes-devtools/python/python_2.7.16.bb5
-rw-r--r--meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb2
-rw-r--r--meta/recipes-devtools/qemu/qemu-helper/tunctl.c16
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc9
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2019-12068.patch108
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2019-15890.patch48
-rw-r--r--meta/recipes-devtools/quilt/quilt.inc3
-rw-r--r--meta/recipes-devtools/ruby/ruby.inc8
-rw-r--r--meta/recipes-devtools/ruby/ruby/0001-extmk-fix-cross-compilation-of-external-gems.patch34
-rw-r--r--meta/recipes-devtools/ruby/ruby/0002-Obey-LDFLAGS-for-the-link-of-libruby.patch22
-rw-r--r--meta/recipes-devtools/ruby/ruby/0003-configure.ac-check-finite-isinf-isnan-as-macros-firs.patch103
-rw-r--r--meta/recipes-devtools/ruby/ruby/extmk.patch16
-rw-r--r--meta/recipes-devtools/ruby/ruby/ruby-CVE-2017-9226.patch32
-rw-r--r--meta/recipes-devtools/ruby/ruby/ruby-CVE-2017-9228.patch34
-rw-r--r--meta/recipes-devtools/ruby/ruby_2.6.5.bb (renamed from meta/recipes-devtools/ruby/ruby_2.5.5.bb)35
-rw-r--r--meta/recipes-devtools/squashfs-tools/squashfs-tools/squashfs-tools-4.3-sysmacros.patch32
-rw-r--r--meta/recipes-devtools/squashfs-tools/squashfs-tools_git.bb16
-rw-r--r--meta/recipes-devtools/strace/strace/0001-Fix-build-when-using-non-glibc-libc-implementation-o.patch31
-rw-r--r--meta/recipes-devtools/strace/strace/disable-git-version-gen.patch10
-rwxr-xr-xmeta/recipes-devtools/strace/strace/run-ptest1
-rw-r--r--meta/recipes-devtools/strace/strace/uintptr_t.patch17
-rw-r--r--meta/recipes-devtools/strace/strace_5.3.bb (renamed from meta/recipes-devtools/strace/strace_5.2.bb)6
-rw-r--r--meta/recipes-devtools/swig/swig.inc3
-rw-r--r--meta/recipes-devtools/unfs3/unfs3/0001-Add-listen-action-for-a-tcp-socket.patch54
-rw-r--r--meta/recipes-devtools/unfs3/unfs3_git.bb4
-rw-r--r--meta/recipes-devtools/vala/vala_0.46.3.bb (renamed from meta/recipes-devtools/vala/vala_0.44.7.bb)4
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/0001-adjust-path-filter-for-2-memcheck-tests.patch40
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64236
-rwxr-xr-xmeta/recipes-devtools/valgrind/valgrind/run-ptest43
-rw-r--r--meta/recipes-devtools/valgrind/valgrind_3.15.0.bb54
-rw-r--r--meta/recipes-extended/acpica/acpica_20190816.bb (renamed from meta/recipes-extended/acpica/acpica_20190509.bb)4
-rw-r--r--meta/recipes-extended/cups/cups.inc4
-rw-r--r--meta/recipes-extended/ethtool/ethtool/avoid_parallel_tests.patch2
-rw-r--r--meta/recipes-extended/ethtool/ethtool_5.3.bb (renamed from meta/recipes-extended/ethtool/ethtool_5.2.bb)6
-rw-r--r--meta/recipes-extended/gawk/gawk_5.0.1.bb4
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2019-14811-0001.patch68
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2019-14817-0001.patch270
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2019-14817-0002.patch236
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript_9.27.bb7
-rw-r--r--meta/recipes-extended/iptables/iptables/iptables.rules (renamed from meta/recipes-core/base-files/base-files/usbd)0
-rw-r--r--meta/recipes-extended/iptables/iptables/iptables.service13
-rw-r--r--meta/recipes-extended/iptables/iptables_1.8.3.bb17
-rw-r--r--meta/recipes-extended/libarchive/libarchive_3.4.0.bb2
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/musl.patch18
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc_1.1.4.bb2
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd_1.4.54.bb5
-rw-r--r--meta/recipes-extended/ltp/ltp/0001-cve-2017-17052-Avoid-unsafe-exits-in-threads.patch64
-rw-r--r--meta/recipes-extended/ltp/ltp/0001-overcommit_memory-update-for-mm-fix-false-positive-O.patch57
-rw-r--r--meta/recipes-extended/ltp/ltp_20190517.bb2
-rw-r--r--meta/recipes-extended/mdadm/mdadm_4.1.bb5
-rw-r--r--meta/recipes-extended/msmtp/msmtp_1.8.6.bb (renamed from meta/recipes-extended/msmtp/msmtp_1.8.5.bb)4
-rw-r--r--meta/recipes-extended/net-tools/net-tools_1.60-26.bb2
-rw-r--r--meta/recipes-extended/pam/libpam_1.3.1.bb2
-rw-r--r--meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb2
-rw-r--r--meta/recipes-extended/procps/procps_3.3.15.bb9
-rw-r--r--meta/recipes-extended/rpcbind/rpcbind_1.2.5.bb2
-rw-r--r--meta/recipes-extended/screen/screen/0001-configure.ac-fix-configure-failed-while-build-dir-ha.patch109
-rw-r--r--meta/recipes-extended/screen/screen/Avoid-mis-identifying-systems-as-SVR4.patch57
-rw-r--r--meta/recipes-extended/screen/screen/Provide-cross-compile-alternatives-for-AC_TRY_RUN.patch137
-rw-r--r--meta/recipes-extended/screen/screen/Remove-redundant-compiler-sanity-checks.patch65
-rw-r--r--meta/recipes-extended/screen/screen/Skip-host-file-system-checks-when-cross-compiling.patch135
-rw-r--r--meta/recipes-extended/screen/screen_4.7.0.bb (renamed from meta/recipes-extended/screen/screen_4.6.2.bb)12
-rw-r--r--meta/recipes-extended/shadow/shadow-sysroot_4.6.bb6
-rw-r--r--meta/recipes-extended/shadow/shadow.inc10
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng/0001-bash-completion-remove-the-shebang-at-the-start.patch23
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng_0.10.08.bb (renamed from meta/recipes-extended/stress-ng/stress-ng_0.10.00.bb)5
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2019-14287-1.patch178
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2019-14287-2.patch112
-rw-r--r--meta/recipes-extended/sudo/sudo_1.8.27.bb4
-rw-r--r--meta/recipes-extended/sysstat/sysstat/0001-Fix-232-Memory-corruption-bug-due-to-Integer-Overflo.patch46
-rw-r--r--meta/recipes-extended/sysstat/sysstat_12.1.6.bb4
-rw-r--r--meta/recipes-extended/tcp-wrappers/tcp-wrappers_7.6.bb2
-rw-r--r--meta/recipes-extended/timezone/timezone.inc12
-rw-r--r--meta/recipes-extended/timezone/tzdata.bb5
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2019-13232_p1.patch33
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2019-13232_p2.patch356
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2019-13232_p3.patch121
-rw-r--r--meta/recipes-extended/unzip/unzip_6.0.bb3
-rw-r--r--meta/recipes-extended/watchdog/watchdog/0001-watchdog-remove-interdependencies-of-watchdog-and-wd.patch2
-rw-r--r--meta/recipes-extended/wget/wget.inc5
-rw-r--r--meta/recipes-gnome/epiphany/epiphany_3.34.1.bb (renamed from meta/recipes-gnome/epiphany/epiphany_3.32.4.bb)5
-rw-r--r--meta/recipes-gnome/gcr/gcr_3.34.0.bb (renamed from meta/recipes-gnome/gcr/gcr_3.28.1.bb)15
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.38.2.bb (renamed from meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.38.1.bb)11
-rw-r--r--meta/recipes-gnome/gnome/adwaita-icon-theme/0001-Don-t-use-AC_CANONICAL_HOST.patch2
-rw-r--r--meta/recipes-gnome/gnome/adwaita-icon-theme/0001-Run-installation-commands-as-shell-jobs.patch12
-rw-r--r--meta/recipes-gnome/gnome/adwaita-icon-theme_3.34.0.bb (renamed from meta/recipes-gnome/gnome/adwaita-icon-theme_3.32.0.bb)8
-rw-r--r--meta/recipes-gnome/gnome/gconf_3.2.6.bb3
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection/0001-Port-cross-compilation-support-to-meson.patch20
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection/0001-Relocate-the-repository-directory-for-native-builds.patch6
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection/0001-Revert-an-incomplete-upstream-attempt-at-cross-compi.patch48
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection/0002-configure.ac-add-host-gi-gi-cross-wrapper-gi-ldd-wra.patch201
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection/0005-Prefix-pkg-config-paths-with-PKG_CONFIG_SYSROOT_DIR-.patch61
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection_1.62.0.bb (renamed from meta/recipes-gnome/gobject-introspection/gobject-introspection_1.60.2.bb)16
-rw-r--r--meta/recipes-gnome/gsettings-desktop-schemas/gsettings-desktop-schemas_3.32.0.bb16
-rw-r--r--meta/recipes-gnome/gsettings-desktop-schemas/gsettings-desktop-schemas_3.34.0.bb18
-rw-r--r--meta/recipes-gnome/gtk+/gtk+3.inc2
-rw-r--r--meta/recipes-gnome/gtk-doc/files/pkg-config-native.patch2
-rw-r--r--meta/recipes-gnome/gtk-doc/gtk-doc_1.32.bb (renamed from meta/recipes-gnome/gtk-doc/gtk-doc_1.31.bb)4
-rw-r--r--meta/recipes-gnome/hicolor-icon-theme/hicolor-icon-theme_0.17.bb4
-rw-r--r--meta/recipes-gnome/json-glib/json-glib_1.4.4.bb3
-rw-r--r--meta/recipes-gnome/libdazzle/libdazzle_3.32.3.bb15
-rw-r--r--meta/recipes-gnome/libdazzle/libdazzle_3.34.1.bb20
-rw-r--r--meta/recipes-gnome/libgudev/libgudev_233.bb5
-rw-r--r--meta/recipes-gnome/libnotify/libnotify_0.7.8.bb3
-rw-r--r--meta/recipes-gnome/librsvg/librsvg_2.40.20.bb8
-rw-r--r--meta/recipes-gnome/libsecret/libsecret_0.19.1.bb (renamed from meta/recipes-gnome/libsecret/libsecret_0.19.0.bb)9
-rw-r--r--meta/recipes-graphics/cogl/cogl-1.0.inc4
-rw-r--r--meta/recipes-graphics/drm/libdrm_2.4.100.bb (renamed from meta/recipes-graphics/drm/libdrm_2.4.99.bb)4
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz_2.6.1.bb23
-rw-r--r--meta/recipes-graphics/libepoxy/libepoxy_1.5.3.bb2
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/0001-Fixed-bug-4538-validate-image-size-when-loading-BMP-.patch34
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/0002-Fixed-bug-4797-SDL-fails-to-compile-with-Mesa-Master.patch41
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2_2.0.10.bb2
-rw-r--r--meta/recipes-graphics/mesa/files/0001-meson.build-check-for-all-linux-host_os-combinations.patch37
-rw-r--r--meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-GLX-optional-again.patch12
-rw-r--r--meta/recipes-graphics/mesa/files/0003-Allow-enable-DRI-without-DRI-drivers.patch12
-rw-r--r--meta/recipes-graphics/mesa/mesa-gl_19.1.6.bb (renamed from meta/recipes-graphics/mesa/mesa-gl_19.1.1.bb)0
-rw-r--r--meta/recipes-graphics/mesa/mesa.inc7
-rw-r--r--meta/recipes-graphics/mesa/mesa_19.1.6.bb (renamed from meta/recipes-graphics/mesa/mesa_19.1.1.bb)4
-rw-r--r--meta/recipes-graphics/pango/pango/0001-Skip-thai-break-tests-without-libthai.patch36
-rw-r--r--meta/recipes-graphics/pango/pango_1.44.6.bb5
-rw-r--r--meta/recipes-graphics/piglit/piglit_git.bb2
-rw-r--r--meta/recipes-graphics/wayland/libinput_1.14.1.bb (renamed from meta/recipes-graphics/wayland/libinput_1.14.0.bb)9
-rw-r--r--meta/recipes-graphics/wayland/weston-conf.bb35
-rw-r--r--meta/recipes-graphics/wayland/weston-init.bb28
-rw-r--r--meta/recipes-graphics/wayland/weston-init/71-weston-drm.rules2
-rw-r--r--meta/recipes-graphics/wayland/weston-init/qemux86-64/weston.ini2
-rw-r--r--meta/recipes-graphics/wayland/weston-init/qemux86/weston.ini2
-rw-r--r--meta/recipes-graphics/wayland/weston-init/weston.ini74
-rw-r--r--meta/recipes-graphics/wayland/weston-init/weston.service13
-rw-r--r--meta/recipes-graphics/wayland/weston-init/weston@.service37
-rw-r--r--meta/recipes-graphics/wayland/weston_7.0.0.bb6
-rw-r--r--meta/recipes-graphics/xorg-font/font-util_1.3.2.bb (renamed from meta/recipes-graphics/xorg-font/font-util_1.3.1.bb)4
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/no-host-libtool.patch45
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/no-host-x.patch40
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11_1.6.8.bb7
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb (renamed from meta/recipes-graphics/xorg-lib/libx11.inc)11
-rw-r--r--meta/recipes-graphics/xorg-lib/libxfont2_2.0.4.bb (renamed from meta/recipes-graphics/xorg-lib/libxfont2_2.0.3.bb)4
-rw-r--r--meta/recipes-graphics/xorg-lib/libxpm_3.5.12.bb2
-rw-r--r--meta/recipes-graphics/xorg-lib/libxvmc_1.0.12.bb (renamed from meta/recipes-graphics/xorg-lib/libxvmc_1.0.11.bb)4
-rw-r--r--meta/recipes-graphics/xorg-proto/xorgproto_2019.2.bb (renamed from meta/recipes-graphics/xorg-proto/xorgproto_2019.1.bb)6
-rw-r--r--meta/recipes-kernel/kexec/kexec-tools/0006-kexec-arm-undefine-__NR_kexec_file_load-for-arm.patch40
-rw-r--r--meta/recipes-kernel/kexec/kexec-tools_2.0.19.bb1
-rw-r--r--meta/recipes-kernel/linux-firmware/linux-firmware_20190815.bb18
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_5.2.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_5.2.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_4.19.bb20
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_5.2.bb22
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules_2.10.11.bb (renamed from meta/recipes-kernel/lttng/lttng-modules_2.10.10.bb)5
-rw-r--r--meta/recipes-kernel/lttng/lttng-ust/0001-Add-config-time-check-for-new-gettid-API.patch57
-rw-r--r--meta/recipes-kernel/lttng/lttng-ust/0001-python-lttngust-Makefile.am-Add-install-lib-to-setup.patch31
-rw-r--r--meta/recipes-kernel/lttng/lttng-ust_2.10.5.bb (renamed from meta/recipes-kernel/lttng/lttng-ust_2.10.4.bb)6
-rw-r--r--meta/recipes-kernel/perf/perf.bb9
-rw-r--r--meta/recipes-kernel/systemtap/systemtap_git.bb11
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.1.bb (renamed from meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.bb)18
-rw-r--r--meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb (renamed from meta/recipes-multimedia/gstreamer/gst-examples_git.bb)1
-rw-r--r--meta/recipes-multimedia/gstreamer/gst-validate_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gst-validate_1.16.0.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.0.bb)11
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/0001-scaletempo-Advertise-interleaved-layout-in-caps-temp.patch37
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/headerfix.patch43
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.0.bb)8
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.0.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.1.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.0.bb)6
-rw-r--r--meta/recipes-multimedia/libogg/libogg_1.3.4.bb2
-rw-r--r--meta/recipes-multimedia/libpng/libpng_1.6.37.bb3
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb4
-rw-r--r--meta/recipes-multimedia/libtheora/libtheora_1.1.1.bb2
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2019-14973.patch415
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2019-17546.patch103
-rw-r--r--meta/recipes-multimedia/libtiff/tiff_4.0.10.bb5
-rw-r--r--meta/recipes-multimedia/libvorbis/libvorbis_1.3.6.bb2
-rw-r--r--meta/recipes-multimedia/mpg123/mpg123_1.25.12.bb (renamed from meta/recipes-multimedia/mpg123/mpg123_1.25.11.bb)4
-rw-r--r--meta/recipes-multimedia/speex/speex_1.2.0.bb2
-rw-r--r--meta/recipes-multimedia/speex/speexdsp_1.2rc3.bb2
-rw-r--r--meta/recipes-multimedia/webp/libwebp_1.0.3.bb7
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/0001-WebKitMacros-Append-to-I-and-not-to-isystem.patch125
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/0001-gstreamer-add-a-missing-format-string.patch24
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/detect-gstreamer-gl.patch20
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/narrowing.patch31
-rw-r--r--meta/recipes-sato/webkit/webkitgtk_2.26.1.bb (renamed from meta/recipes-sato/webkit/webkitgtk_2.24.4.bb)9
-rw-r--r--meta/recipes-support/apr/apr/0001-build-buildcheck.sh-improve-libtool-detection.patch32
-rw-r--r--meta/recipes-support/apr/apr/libtoolize_check.patch28
-rw-r--r--meta/recipes-support/apr/apr_1.7.0.bb5
-rw-r--r--meta/recipes-support/aspell/aspell_0.60.8.bb (renamed from meta/recipes-support/aspell/aspell_0.60.7.bb)6
-rw-r--r--meta/recipes-support/curl/curl_7.66.0.bb (renamed from meta/recipes-support/curl/curl_7.65.3.bb)4
-rw-r--r--meta/recipes-support/fribidi/fribidi/meson.patch44
-rw-r--r--meta/recipes-support/fribidi/fribidi_1.0.7.bb (renamed from meta/recipes-support/fribidi/fribidi_1.0.5.bb)6
-rw-r--r--meta/recipes-support/gnupg/gnupg_2.2.17.bb3
-rw-r--r--meta/recipes-support/gnutls/libtasn1/fix-gtkdoc.patch38
-rw-r--r--meta/recipes-support/gnutls/libtasn1_4.14.bb1
-rw-r--r--meta/recipes-support/iso-codes/iso-codes_4.4.bb (renamed from meta/recipes-support/iso-codes/iso-codes_4.3.bb)2
-rw-r--r--meta/recipes-support/libcap-ng/libcap-ng-python_0.7.10.bb (renamed from meta/recipes-support/libcap-ng/libcap-ng-python_0.7.9.bb)0
-rw-r--r--meta/recipes-support/libcap-ng/libcap-ng.inc6
-rw-r--r--meta/recipes-support/libcap-ng/libcap-ng_0.7.10.bb (renamed from meta/recipes-support/libcap-ng/libcap-ng_0.7.9.bb)1
-rw-r--r--meta/recipes-support/libcap/libcap_2.27.bb2
-rw-r--r--meta/recipes-support/libcheck/libcheck_0.12.0.bb9
-rw-r--r--meta/recipes-support/libevent/libevent/run-ptest9
-rw-r--r--meta/recipes-support/libgcrypt/files/0001-Prefetch-GCM-look-up-tables.patch90
-rw-r--r--meta/recipes-support/libgcrypt/files/0001-libgcrypt-fix-m4-file-for-oe-core.patch (renamed from meta/recipes-support/libgcrypt/files/0001-Add-and-use-pkg-config-for-libgcrypt-instead-of-conf.patch)138
-rw-r--r--meta/recipes-support/libgcrypt/files/0002-AES-move-look-up-tables-to-.data-section-and-unshare.patch332
-rw-r--r--meta/recipes-support/libgcrypt/files/0003-GCM-move-look-up-table-to-.data-section-and-unshare-.patch178
-rw-r--r--meta/recipes-support/libgcrypt/libgcrypt_1.8.5.bb (renamed from meta/recipes-support/libgcrypt/libgcrypt_1.8.4.bb)9
-rw-r--r--meta/recipes-support/libical/libical/0001-Use-our-hand-build-native-src-generator.patch33
-rw-r--r--meta/recipes-support/libical/libical_3.0.5.bb35
-rw-r--r--meta/recipes-support/libical/libical_3.0.6.bb43
-rw-r--r--meta/recipes-support/libksba/libksba_1.3.5.bb4
-rw-r--r--meta/recipes-support/libmpc/libmpc_1.1.0.bb2
-rw-r--r--meta/recipes-support/libpcre/libpcre2_10.33.bb2
-rw-r--r--meta/recipes-support/libpcre/libpcre_8.43.bb2
-rw-r--r--meta/recipes-support/libsoup/libsoup-2.4_2.66.4.bb (renamed from meta/recipes-support/libsoup/libsoup-2.4_2.66.2.bb)10
-rw-r--r--meta/recipes-support/libunwind/libunwind.inc1
-rw-r--r--meta/recipes-support/libxslt/files/CVE-2019-18197.patch33
-rw-r--r--meta/recipes-support/libxslt/libxslt_1.1.33.bb1
-rw-r--r--meta/recipes-support/lz4/lz4_1.9.2.bb3
-rw-r--r--meta/recipes-support/nspr/nspr/0004-Add-ARC-support.patch88
-rw-r--r--meta/recipes-support/nspr/nspr_4.23.bb (renamed from meta/recipes-support/nspr/nspr_4.21.bb)5
-rw-r--r--meta/recipes-support/p11-kit/p11-kit/0001-LINGUAS-drop-the-languages-for-which-upstream-does-n.patch29
-rw-r--r--meta/recipes-support/p11-kit/p11-kit_0.23.18.1.bb (renamed from meta/recipes-support/p11-kit/p11-kit_0.23.16.1.bb)8
-rw-r--r--meta/recipes-support/sqlite/sqlite3_3.30.0.bb (renamed from meta/recipes-support/sqlite/sqlite3_3.29.0.bb)4
-rw-r--r--meta/recipes-support/vte/vte/0001-Add-m4-vapigen.m4.patch119
-rw-r--r--meta/recipes-support/vte/vte/0001-Don-t-enable-stack-protection-by-default.patch29
-rw-r--r--meta/recipes-support/vte/vte/0002-Add-W_EXITCODE-macro-for-non-glibc-systems.patch (renamed from meta/recipes-support/vte/vte/0001-Add-W_EXITCODE-macro-for-non-glibc-systems.patch)0
-rw-r--r--meta/recipes-support/vte/vte_0.58.2.bb (renamed from meta/recipes-support/vte/vte_0.56.3.bb)25
-rwxr-xr-xscripts/gen-lockedsig-cache49
-rw-r--r--scripts/lib/devtool/deploy.py8
-rw-r--r--scripts/lib/devtool/standard.py39
-rw-r--r--scripts/lib/resulttool/log.py43
-rw-r--r--scripts/lib/resulttool/report.py44
-rw-r--r--scripts/lib/resulttool/resultutils.py49
-rw-r--r--scripts/lib/wic/engine.py2
-rw-r--r--scripts/lib/wic/help.py4
-rw-r--r--scripts/lib/wic/partition.py23
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py3
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py3
-rwxr-xr-xscripts/oe-git-proxy21
-rwxr-xr-xscripts/oe-pkgdata-util17
-rwxr-xr-xscripts/runqemu27
593 files changed, 12712 insertions, 7380 deletions
diff --git a/contrib/artwork/oe.svg b/contrib/artwork/oe.svg
new file mode 100644
index 0000000000..a3545cb136
--- /dev/null
+++ b/contrib/artwork/oe.svg
@@ -0,0 +1,80 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 13.0.2, SVG Export Plug-In . SVG Version: 6.00 Build 14948) -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ width="209.104px" height="167.858px" viewBox="0 0 209.104 167.858" enable-background="new 0 0 209.104 167.858"
+ xml:space="preserve">
+<g>
+ <g>
+ <path fill="#00BCE4" d="M74.145,145.897c-0.568,0.568-0.841,1.228-0.841,2.03v0.339h5.744v-0.339c0-1.601-1.273-2.87-2.872-2.872
+ C75.372,145.056,74.713,145.33,74.145,145.897z M73.304,154.262c0,0.876,0.729,1.605,1.605,1.605h6.334
+ c1.357,0,2.195,0.838,2.195,2.194c0,1.357-0.838,2.195-2.195,2.195h-6.334c-1.657,0-3.101-0.58-4.265-1.73
+ c-1.15-1.163-1.731-2.607-1.731-4.265v-6.334c0-4.041,3.221-7.261,7.262-7.263c2.019,0,3.752,0.705,5.148,2.115
+ c1.41,1.396,2.114,3.13,2.114,5.147v4.729H73.304V154.262z"/>
+ <path fill="#00BCE4" d="M99.738,140.665c1.662,0,3.094,0.584,4.246,1.75c1.166,1.153,1.749,2.584,1.75,4.246v11.4
+ c0,1.357-0.838,2.195-2.195,2.195s-2.195-0.838-2.195-2.195v-11.4c0-0.922-0.684-1.604-1.605-1.605h-1.604v13.006
+ c0,1.357-0.838,2.195-2.195,2.195s-2.195-0.838-2.195-2.195v-13.006h-3.21v13.006c0,1.357-0.838,2.195-2.195,2.195
+ s-2.195-0.838-2.195-2.195V142.86c0-1.357,0.838-2.195,2.195-2.195H99.738z"/>
+ <path fill="#00BCE4" d="M123.308,154.262c0,3.37-2.625,5.995-5.995,5.995h-6.334c-1.356,0-2.194-0.838-2.194-2.195V135.26
+ c0-1.356,0.838-2.195,2.194-2.195c1.357,0,2.195,0.839,2.195,2.195v5.405h4.139c3.472,0,5.993,2.658,5.995,5.995V154.262z
+ M113.174,145.056v10.812h4.139c0.903,0,1.604-0.701,1.604-1.605v-7.601c0-0.927-0.678-1.604-1.605-1.605H113.174z"/>
+ <path fill="#00BCE4" d="M130.897,145.897c-0.568,0.568-0.842,1.228-0.842,2.03v0.339h5.744v-0.339c0-1.601-1.273-2.87-2.872-2.872
+ C132.124,145.056,131.466,145.33,130.897,145.897z M130.056,154.262c0,0.876,0.729,1.605,1.605,1.605h6.334
+ c1.357,0,2.195,0.838,2.195,2.194c0,1.357-0.838,2.195-2.195,2.195h-6.334c-1.657,0-3.102-0.58-4.266-1.73
+ c-1.148-1.163-1.73-2.607-1.73-4.265v-6.334c0-4.041,3.221-7.261,7.263-7.263c2.019,0,3.752,0.705,5.147,2.115
+ c1.41,1.396,2.115,3.13,2.115,5.147v4.729h-10.135V154.262z"/>
+ <path fill="#00BCE4" d="M143.688,143.646c1.129-1.972,2.906-2.98,5.201-2.981h4.139v-5.405c0-1.356,0.838-2.195,2.195-2.195
+ s2.195,0.839,2.195,2.195v22.802c0,1.357-0.838,2.195-2.195,2.195h-6.334c-1.657,0-3.102-0.58-4.266-1.73
+ c-1.149-1.163-1.73-2.607-1.73-4.265v-7.601C142.894,145.581,143.157,144.572,143.688,143.646z M147.741,145.532
+ c-0.307,0.309-0.457,0.665-0.457,1.129v7.601c0,0.878,0.727,1.605,1.605,1.605h4.139v-10.812h-4.139
+ C148.433,145.056,148.067,145.207,147.741,145.532z"/>
+ <path fill="#00BCE4" d="M160.917,143.646c1.129-1.972,2.905-2.98,5.201-2.981h4.139v-5.405c0-1.356,0.838-2.195,2.195-2.195
+ c1.356,0,2.195,0.839,2.195,2.195v22.802c0,1.357-0.839,2.195-2.195,2.195h-6.334c-1.657,0-3.102-0.58-4.266-1.73
+ c-1.149-1.163-1.73-2.607-1.73-4.265v-7.601C160.122,145.581,160.386,144.572,160.917,143.646z M164.97,145.532
+ c-0.308,0.309-0.457,0.665-0.457,1.129v7.601c0,0.878,0.727,1.605,1.605,1.605h4.139v-10.812h-4.139
+ C165.661,145.056,165.296,145.207,164.97,145.532z"/>
+ <path fill="#00BCE4" d="M182.583,145.897c-0.568,0.568-0.842,1.228-0.842,2.03v0.339h5.743l0.001-0.339
+ c0-1.601-1.273-2.87-2.873-2.872C183.81,145.056,183.15,145.33,182.583,145.897z M181.741,154.262
+ c0,0.876,0.729,1.605,1.605,1.605h6.333c1.357,0,2.195,0.838,2.195,2.194c0,1.357-0.838,2.195-2.195,2.195h-6.333
+ c-1.658,0-3.102-0.58-4.266-1.73c-1.149-1.163-1.73-2.607-1.73-4.265v-6.334c0-4.041,3.221-7.261,7.262-7.263
+ c2.019,0,3.752,0.705,5.148,2.115c1.41,1.396,2.114,3.13,2.114,5.147v4.729h-10.134V154.262z"/>
+ <path fill="#00BCE4" d="M195.374,143.646c1.129-1.972,2.905-2.98,5.2-2.981h4.139v-5.405c0-1.356,0.839-2.195,2.195-2.195
+ c1.357,0,2.195,0.839,2.195,2.195v22.802c0,1.357-0.838,2.195-2.195,2.195h-6.333c-1.658,0-3.102-0.58-4.266-1.73
+ c-1.149-1.163-1.73-2.607-1.73-4.265v-7.601C194.579,145.582,194.843,144.572,195.374,143.646z M199.427,145.532
+ c-0.308,0.309-0.457,0.665-0.457,1.129v7.601c0,0.878,0.727,1.605,1.605,1.605h4.138v-10.812h-4.138
+ C200.118,145.056,199.753,145.207,199.427,145.532z"/>
+ </g>
+ <g>
+ <path fill="#5C6F7B" d="M19.423,140.665h6.333c1.662,0,3.094,0.584,4.246,1.75c1.167,1.153,1.75,2.585,1.75,4.246v7.601
+ c0,1.651-0.587,3.082-1.748,4.244c-1.152,1.167-2.586,1.751-4.248,1.751h-4.138v5.406c0,1.357-0.838,2.195-2.195,2.195
+ s-2.195-0.839-2.195-2.195V142.86C17.228,141.503,18.066,140.665,19.423,140.665z M21.619,155.867h4.138
+ c0.903,0,1.605-0.701,1.605-1.605v-7.601c0-0.928-0.679-1.604-1.605-1.605h-4.138V155.867z"/>
+ <path fill="#5C6F7B" d="M39.688,145.897c-0.569,0.568-0.841,1.228-0.841,2.03v0.339h5.743v-0.339c0-1.601-1.272-2.87-2.872-2.872
+ C40.915,145.056,40.256,145.33,39.688,145.897z M38.847,154.262c0,0.876,0.729,1.605,1.605,1.605h6.333
+ c1.357,0,2.195,0.838,2.195,2.194c0,1.357-0.838,2.195-2.195,2.195h-6.333c-1.657,0-3.101-0.58-4.265-1.73
+ c-1.15-1.163-1.73-2.607-1.73-4.265v-6.334c0-4.041,3.22-7.261,7.262-7.263c2.018,0,3.752,0.705,5.147,2.115
+ c1.411,1.396,2.115,3.13,2.115,5.147v4.729H38.847V154.262z"/>
+ <path fill="#5C6F7B" d="M51.685,142.86c0-1.357,0.838-2.195,2.195-2.195h6.334c1.662,0,3.094,0.584,4.246,1.75
+ c1.167,1.153,1.749,2.584,1.75,4.246v11.4c0,1.357-0.838,2.195-2.195,2.195s-2.195-0.838-2.195-2.195v-11.4
+ c0-0.922-0.684-1.604-1.605-1.605h-4.138v13.006c0,1.357-0.838,2.195-2.195,2.195c-1.357,0-2.195-0.838-2.195-2.195V142.86z"/>
+ <path fill-rule="evenodd" clip-rule="evenodd" fill="#5C6F7B" d="M10.134,147.928c0-1.601-1.273-2.87-2.872-2.872
+ c-1.599,0-2.872,1.273-2.872,2.872v5.067c0,0.824,0.267,1.485,0.822,2.049c0.564,0.555,1.226,0.823,2.05,0.823
+ c1.601,0,2.871-1.273,2.872-2.873V147.928z M0,147.928c0-4.041,3.22-7.261,7.262-7.263c2.019,0,3.752,0.705,5.148,2.115
+ c1.411,1.396,2.114,3.13,2.114,5.147v5.067c0,4.041-3.222,7.262-7.262,7.262c-1.997,0-3.732-0.713-5.147-2.114
+ C0.713,156.727,0,154.991,0,152.995V147.928z"/>
+ </g>
+ <g>
+ <path fill-rule="evenodd" clip-rule="evenodd" fill="#5C6F7B" d="M25.428,79.124C25.428,35.424,60.853,0,104.552,0
+ s79.123,35.424,79.123,79.124c0,14.938-4.139,28.909-11.333,40.828h-26.366c10.352-10.5,16.738-24.918,16.738-40.828
+ c0-32.123-26.039-58.163-58.162-58.163s-58.163,26.04-58.163,58.163c0,15.91,6.388,30.328,16.738,40.828H36.761
+ C29.568,108.032,25.428,94.062,25.428,79.124z"/>
+ </g>
+ <g>
+ <path fill="#00BCE4" d="M94.122,79.124c0-2.882,1.168-5.491,3.059-7.379l17.436-17.494c-9.682-3.913-21.184-1.944-29.033,5.905
+ c-10.476,10.476-10.476,27.461,0,37.937c10.476,10.477,27.461,10.477,37.938,0c4.547-4.547,7.12-10.321,7.721-16.256l18.46-18.461
+ c5.838,16.774,2.056,36.151-11.345,49.553c-18.67,18.67-48.939,18.67-67.609,0c-18.67-18.67-18.67-48.938,0-67.608
+ c18.669-18.67,48.939-18.67,67.609,0c2.535,2.536,4.728,5.287,6.574,8.193l-32.986,32.997c-1.888,1.89-4.511,3.045-7.393,3.045
+ C98.791,89.554,94.122,84.884,94.122,79.124z"/>
+ </g>
+</g>
+</svg>
diff --git a/meta-selftest/conf/layer.conf b/meta-selftest/conf/layer.conf
index 080350a56d..08181a88cc 100644
--- a/meta-selftest/conf/layer.conf
+++ b/meta-selftest/conf/layer.conf
@@ -9,4 +9,4 @@ BBFILE_COLLECTIONS += "selftest"
BBFILE_PATTERN_selftest = "^${LAYERDIR}/"
BBFILE_PRIORITY_selftest = "5"
-LAYERSERIES_COMPAT_selftest = "warrior"
+LAYERSERIES_COMPAT_selftest = "zeus"
diff --git a/meta-selftest/lib/oeqa/runtime/cases/virgl.py b/meta-selftest/lib/oeqa/runtime/cases/virgl.py
index d301a19fa4..c0abfd1b16 100644
--- a/meta-selftest/lib/oeqa/runtime/cases/virgl.py
+++ b/meta-selftest/lib/oeqa/runtime/cases/virgl.py
@@ -13,11 +13,6 @@ class VirglTest(OERuntimeTestCase):
@OETestDepends(['virgl.VirglTest.test_kernel_driver'])
def test_kmscube(self):
-
- distro = oe.lsb.distro_identifier()
- if distro and distro == 'centos-7':
- self.skipTest('kmscube is not working when centos 7 is the host OS')
-
status, output = self.target.run('kmscube', timeout=30)
self.assertEqual(status, 0, "kmscube exited with non-zero status %d and output:\n%s" %(status, output))
self.assertIn('renderer: "virgl"', output, "kmscube does not seem to use virgl:\n%s" %(output))
diff --git a/meta-selftest/recipes-test/aspell/aspell_0.60.7.bbappend b/meta-selftest/recipes-test/aspell/aspell_0.60.8.bbappend
index 205720982c..205720982c 100644
--- a/meta-selftest/recipes-test/aspell/aspell_0.60.7.bbappend
+++ b/meta-selftest/recipes-test/aspell/aspell_0.60.8.bbappend
diff --git a/meta-selftest/recipes-test/multiconfig/multiconfig-test-parse.bb b/meta-selftest/recipes-test/multiconfig/multiconfig-test-parse.bb
new file mode 100644
index 0000000000..6236697453
--- /dev/null
+++ b/meta-selftest/recipes-test/multiconfig/multiconfig-test-parse.bb
@@ -0,0 +1,11 @@
+SUMMARY = "Test Multiconfig Parsing"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+do_showvar() {
+ bbplain "MCTESTVAR=${MCTESTVAR}"
+}
+addtask do_showvar
+
diff --git a/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb b/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb
index 7c20d9a683..0cd0494da8 100644
--- a/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb
+++ b/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb
@@ -4,7 +4,7 @@ require recipeutils-test.inc
LICENSE = "Proprietary"
LIC_FILES_CHKSUM = "file://${WORKDIR}/somefile;md5=d41d8cd98f00b204e9800998ecf8427e"
-DEPENDS += "virtual/libx11"
+DEPENDS += "zlib"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-skeleton/conf/layer.conf b/meta-skeleton/conf/layer.conf
index 7d4ffb46ed..7d7381a700 100644
--- a/meta-skeleton/conf/layer.conf
+++ b/meta-skeleton/conf/layer.conf
@@ -14,4 +14,4 @@ LAYERVERSION_skeleton = "1"
LAYERDEPENDS_skeleton = "core"
-LAYERSERIES_COMPAT_skeleton = "warrior"
+LAYERSERIES_COMPAT_skeleton = "zeus"
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 13b05bb5f2..093e2d95af 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -221,9 +221,10 @@ python do_ar_patched() {
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
- ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ if not is_work_shared(d):
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ d.setVar('WORKDIR', ar_workdir)
bb.note('Archiving the patched source...')
- d.setVar('WORKDIR', ar_workdir)
create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
}
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 0c8a4b2862..1cea3a2213 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -32,9 +32,11 @@ def oe_import(d):
import oe.data
for toimport in oe.data.typed_value("OE_IMPORTS", d):
- imported = __import__(toimport)
- inject(toimport.split(".", 1)[0], imported)
-
+ try:
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
+ except AttributeError as e:
+ bb.error("Error importing OE modules: %s" % str(e))
return ""
# We need the oe module name space early (before INHERITs get added)
@@ -480,6 +482,7 @@ python () {
# If we're building a target package we need to use fakeroot (pseudo)
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_unpack', 'umask', '022')
d.setVarFlag('do_configure', 'umask', '022')
d.setVarFlag('do_compile', 'umask', '022')
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index 2b317c832f..291f1e8d44 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -106,11 +106,12 @@ set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
# only search in the paths provided so cmake doesnt pick
# up libraries and tools from the native build machine
-set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
+set( CMAKE_PROGRAM_PATH "/" )
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index a9a2cec68f..94f112c397 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -30,6 +30,7 @@
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
# - doc-pkgs - documentation packages for all installed packages in the rootfs
+# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
# - read-only-rootfs - tweaks an image to support read-only rootfs
# - splash - bootup splash screen
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index c00d2910be..1c8b2223a2 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -208,19 +208,21 @@ def check_cves(d, patched_cves):
if cve in cve_whitelist:
bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
+ # TODO: this should be in the report as 'whitelisted'
+ patched_cves.add(cve)
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
else:
to_append = False
if (operator_start == '=' and pv == version_start):
- cves_unpatched.append(cve)
+ to_append = True
else:
if operator_start:
try:
to_append_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
to_append_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
except:
- bb.note("%s: Failed to compare %s %s %s for %s" %
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
to_append_start = False
else:
@@ -231,7 +233,7 @@ def check_cves(d, patched_cves):
to_append_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
to_append_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
except:
- bb.note("%s: Failed to compare %s %s %s for %s" %
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
to_append_end = False
else:
@@ -243,8 +245,11 @@ def check_cves(d, patched_cves):
to_append = to_append_start or to_append_end
if to_append:
+ bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
cves_unpatched.append(cve)
- bb.debug(2, "%s-%s is not patched for %s" % (product, pv, cve))
+ else:
+ bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
+ patched_cves.add(cve)
conn.close()
return (list(patched_cves), cves_unpatched)
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index a8110006fb..280d6009f3 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -97,17 +97,15 @@ python devtool_post_unpack() {
local_files = oe.recipeutils.get_recipe_local_files(d)
if is_kernel_yocto:
- for key in local_files.copy():
- if key.endswith('scc'):
- sccfile = open(local_files[key], 'r')
+ for key in [f for f in local_files if f.endswith('scc')]:
+ with open(local_files[key], 'r') as sccfile:
for l in sccfile:
line = l.split()
if line and line[0] in ('kconf', 'patch'):
cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
- if not cfg in local_files.values():
+ if cfg not in local_files.values():
local_files[line[-1]] = cfg
shutil.copy2(cfg, workdir)
- sccfile.close()
# Ignore local files with subdir={BP}
srcabspath = os.path.abspath(srcsubdir)
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 3618b99a86..ea59d02ed9 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -203,7 +203,7 @@ def srctree_hash_files(d, srcdir=None):
ret = " "
if git_dir is not None:
- oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+ oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
# Clone index
shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
index f661a69f83..8b5ff20c72 100644
--- a/meta/classes/grub-efi-cfg.bbclass
+++ b/meta/classes/grub-efi-cfg.bbclass
@@ -23,7 +23,6 @@ GRUB_TIMEOUT ?= "10"
#FIXME: build this from the machine config
GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
-EFIDIR = "/EFI/BOOT"
GRUB_ROOT ?= "${ROOT}"
APPEND ?= ""
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
index ec692f1646..8fc6999e52 100644
--- a/meta/classes/grub-efi.bbclass
+++ b/meta/classes/grub-efi.bbclass
@@ -1,46 +1,8 @@
inherit grub-efi-cfg
+require conf/image-uefi.conf
efi_populate() {
- # DEST must be the root of the image so that EFIDIR is not
- # nested under a top level directory.
- DEST=$1
-
- install -d ${DEST}${EFIDIR}
-
- GRUB_IMAGE="grub-efi-bootia32.efi"
- DEST_IMAGE="bootia32.efi"
- if [ -n "${MLPREFIX}" ]; then
- if [ "${TARGET_ARCH_MULTILIB_ORIGINAL}" = "x86_64" ]; then
- GRUB_IMAGE="grub-efi-bootx64.efi"
- DEST_IMAGE="bootx64.efi"
- fi
- else
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- GRUB_IMAGE="grub-efi-bootx64.efi"
- DEST_IMAGE="bootx64.efi"
- fi
- fi
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}/${DEST_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_IMAGE" >${DEST}/startup.nsh
+ efi_populate_common "$1" grub-efi
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- # Build a EFI directory to create efi.img
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index fc82f8de1a..bc3d6f4cc8 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -73,12 +73,15 @@ ICECC_ENV_DEBUG ??= ""
#
# libgcc-initial - fails with CPP sanity check error if host sysroot contains
# cross gcc built for another target tune/variant
+# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
+# pragma omp threadprivate(prng_state)
# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
# inline assembly
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
ICECC_SYSTEM_PACKAGE_BL += "\
libgcc-initial \
+ pixman \
systemtap \
target-sdk-provides-dummy \
"
@@ -135,6 +138,10 @@ def use_icecc(bb,d):
if icecc_is_cross_canadian(bb, d):
return "no"
+ if d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ # We don't have a compiler, so no icecc
+ return "no"
+
pn = d.getVar('PN')
bpn = d.getVar('BPN')
@@ -349,17 +356,6 @@ set_icecc_env() {
return
fi
- # Create symlinks to icecc in the recipe-sysroot directory
- mkdir -p ${ICE_PATH}
- if [ -n "${KERNEL_CC}" ]; then
- compilers="${@get_cross_kernel_cc(bb,d)}"
- else
- compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
- fi
- for compiler in $compilers; do
- ln -sf ${ICECC_BIN} ${ICE_PATH}/$compiler
- done
-
ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
# cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
@@ -378,6 +374,26 @@ set_icecc_env() {
return
fi
+ # Create symlinks to icecc and wrapper-scripts in the recipe-sysroot directory
+ mkdir -p $ICE_PATH/symlinks
+ if [ -n "${KERNEL_CC}" ]; then
+ compilers="${@get_cross_kernel_cc(bb,d)}"
+ else
+ compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
+ fi
+ for compiler in $compilers; do
+ ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
+ rm -f $ICE_PATH/$compiler
+ cat <<-__EOF__ > $ICE_PATH/$compiler
+ #!/bin/sh -e
+ export ICECC_VERSION=$ICECC_VERSION
+ export ICECC_CC=$ICECC_CC
+ export ICECC_CXX=$ICECC_CXX
+ $ICE_PATH/symlinks/$compiler "\$@"
+ __EOF__
+ chmod 775 $ICE_PATH/$compiler
+ done
+
ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
@@ -410,7 +426,6 @@ set_icecc_env() {
export CCACHE_PATH="$PATH"
export CCACHE_DISABLE="1"
- export ICECC_VERSION ICECC_CC ICECC_CXX
export PATH="$ICE_PATH:$PATH"
bbnote "Using icecc path: $ICE_PATH"
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index af71be5093..54058b350d 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -37,7 +37,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
LABELS_LIVE ?= "boot install"
ROOT_LIVE ?= "root=/dev/ram0"
INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
-INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
LIVE_ROOTFS_TYPE ?= "ext4"
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index f4633da3d5..c2824395c9 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -124,7 +124,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 9b886d1380..9605ac2bae 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -34,7 +34,7 @@ ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
split-strip packages-list pkgv-undefined var-undefined \
version-going-backwards expanded-d invalid-chars \
license-checksum dev-elf file-rdeps configure-unsafe \
- configure-gettext \
+ configure-gettext perllocalpod \
"
# Add usrmerge QA check based on distro feature
ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
@@ -795,6 +795,23 @@ def package_qa_check_usrmerge(pkg, d, messages):
return False
return True
+QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
+def package_qa_check_perllocalpod(pkg, d, messages):
+ """
+ Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
+ installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
+ handle this for most recipes.
+ """
+ import glob
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
+
+ matches = glob.glob(podpath)
+ if matches:
+ matches = [package_qa_clean_path(path, d, pkg) for path in matches]
+ msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
+ package_qa_add_message(messages, "perllocalpod", msg)
+
QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
def package_qa_check_expanded_d(package, d, messages):
"""
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index 867b776aa7..522c46575d 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -71,23 +71,23 @@ do_deploy_append() {
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
- install -d ${DEPLOYDIR}
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ install -d $deployDir
+ install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
cat ${D}/${KERNEL_IMAGEDEST}/$type \
- ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
- ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- > ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
fi
fi
done
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index b51882dce4..ec18a3d699 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -53,6 +53,9 @@ UBOOT_MKIMAGE_DTCOPTS ??= ""
# fitImage Hash Algo
FIT_HASH_ALG ?= "sha256"
+# fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+
#
# Emit the fitImage ITS header
#
@@ -207,7 +210,6 @@ EOF
fitimage_emit_section_ramdisk() {
ramdisk_csum="${FIT_HASH_ALG}"
- ramdisk_ctype="none"
ramdisk_loadline=""
ramdisk_entryline=""
@@ -218,24 +220,6 @@ fitimage_emit_section_ramdisk() {
ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
fi
- case $3 in
- *.gz)
- ramdisk_ctype="gzip"
- ;;
- *.bz2)
- ramdisk_ctype="bzip2"
- ;;
- *.lzma)
- ramdisk_ctype="lzma"
- ;;
- *.lzo)
- ramdisk_ctype="lzo"
- ;;
- *.lz4)
- ramdisk_ctype="lz4"
- ;;
- esac
-
cat << EOF >> ${1}
ramdisk@${2} {
description = "${INITRAMFS_IMAGE}";
@@ -243,7 +227,7 @@ fitimage_emit_section_ramdisk() {
type = "ramdisk";
arch = "${UBOOT_ARCH}";
os = "linux";
- compression = "${ramdisk_ctype}";
+ compression = "none";
${ramdisk_loadline}
${ramdisk_entryline}
hash@1 {
@@ -265,6 +249,7 @@ EOF
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
+ conf_sign_algo="${FIT_SIGN_ALG}"
if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -346,7 +331,7 @@ EOF
cat << EOF >> ${1}
signature@1 {
- algo = "${conf_csum},rsa2048";
+ algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
${sign_line}
};
@@ -519,27 +504,27 @@ kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_NAME}.its
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
- install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its
- ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
fi
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
# UBOOT_DTB_IMAGE is a realfile, but we can't use
# ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
# for u-boot, but we are in kernel env now.
- install -m 0644 ${B}/u-boot-${MACHINE}*.dtb ${DEPLOYDIR}/
+ install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
fi
fi
}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index a66e540884..de816bcec1 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -346,14 +346,13 @@ python package_do_split_gconvs () {
if use_bin == "compile":
makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
- m = open(makefile, "w")
- m.write("all: %s\n\n" % " ".join(commands.keys()))
- total = len(commands)
- for i, cmd in enumerate(commands):
- m.write(cmd + ":\n")
- m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
- m.write("\t" + commands[cmd] + "\n\n")
- m.close()
+ with open(makefile, "w") as m:
+ m.write("all: %s\n\n" % " ".join(commands.keys()))
+ total = len(commands)
+ for i, (maketarget, makerecipe) in enumerate(commands.items()):
+ m.write(maketarget + ":\n")
+ m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
+ m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index adca881c85..648a4d7892 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -287,17 +287,7 @@ def incompatible_license_contains(license, truevalue, falsevalue, d):
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
return truevalue if license in bad_licenses else falsevalue
-def incompatible_license(d, dont_want_licenses, package=None):
- """
- This function checks if a recipe has only incompatible licenses. It also
- take into consideration 'or' operand. dont_want_licenses should be passed
- as canonical (SPDX) names.
- """
- import oe.license
- license = d.getVar("LICENSE_%s" % package) if package else None
- if not license:
- license = d.getVar('LICENSE')
-
+def incompatible_pkg_license(d, dont_want_licenses, license):
# Handles an "or" or two license sets provided by
# flattened_licenses(), pick one that works if possible.
def choose_lic_set(a, b):
@@ -311,6 +301,19 @@ def incompatible_license(d, dont_want_licenses, package=None):
return any(not oe.license.license_ok(canonical_license(d, l), \
dont_want_licenses) for l in licenses)
+def incompatible_license(d, dont_want_licenses, package=None):
+ """
+ This function checks if a recipe has only incompatible licenses. It also
+ take into consideration 'or' operand. dont_want_licenses should be passed
+ as canonical (SPDX) names.
+ """
+ import oe.license
+ license = d.getVar("LICENSE_%s" % package) if package else None
+ if not license:
+ license = d.getVar('LICENSE')
+
+ return incompatible_pkg_license(d, dont_want_licenses, license)
+
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGS that
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 3f102d0fbc..b5399b6d96 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -43,10 +43,16 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
bad_licenses = [canonical_license(d, l) for l in bad_licenses]
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ whitelist = []
+ for lic in bad_licenses:
+ whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
+
with open(license_manifest, "w") as license_file:
for pkg in sorted(pkg_dic):
- if bad_licenses:
+ if bad_licenses and pkg not in whitelist:
try:
+ if incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"]):
+ bb.fatal("Package %s has an incompatible license %s and cannot be installed into the image." %(pkg, pkg_dic[pkg]["LICENSE"]))
(pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
bad_licenses, canonical_license, d)
@@ -56,6 +62,8 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
+ if pkg in whitelist:
+ bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
# Rootfs manifest
diff --git a/meta/classes/live-vm-common.bbclass b/meta/classes/live-vm-common.bbclass
index 68105d9b84..74e7074a53 100644
--- a/meta/classes/live-vm-common.bbclass
+++ b/meta/classes/live-vm-common.bbclass
@@ -29,6 +29,39 @@ def pcbios(d):
PCBIOS = "${@pcbios(d)}"
PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
+# efi_populate_common DEST BOOTLOADER
+efi_populate_common() {
+ # DEST must be the root of the image so that EFIDIR is not
+ # nested under a top level directory.
+ DEST=$1
+
+ install -d ${DEST}${EFIDIR}
+
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ # Build a EFI directory to create efi.img
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
+
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
+
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
+
inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
index 48ca112d80..efa6234078 100644
--- a/meta/classes/meson.bbclass
+++ b/meta/classes/meson.bbclass
@@ -55,6 +55,8 @@ def meson_cpu_family(var, d):
return 'mips64'
elif re.match(r"i[3-6]86", arch):
return "x86"
+ elif arch == "microblazeel" or arch == "microblazeeb":
+ return "microblaze"
else:
return arch
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index d625bd527c..1a9295d36f 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -187,7 +187,7 @@ python do_package_qa_multilib() {
if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
(not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
(not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')) \
- and (not i.startswith("kernel-image")):
+ and (not i.startswith("kernel-image")) and (not i.startswith("/")):
candidates.append(i)
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index f25b0c31b1..03135acedc 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -100,6 +100,8 @@ python () {
clsextend.map_packagevars()
clsextend.map_variable("PROVIDES")
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
+ d.setVar("LIBCEXTENSION", "")
+ d.setVar("ABIEXTENSION", "")
}
addhandler nativesdk_virtclass_handler
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index aa8451ffe8..f955df1111 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -826,8 +826,9 @@ python fixup_perms () {
# Now we actually load from the configuration files
for conf in get_fs_perms_list(d).split():
- if os.path.exists(conf):
- f = open(conf)
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
for line in f:
if line.startswith('#'):
continue
@@ -848,7 +849,6 @@ python fixup_perms () {
fs_perms_table[entry.path] = entry
if entry.path in fs_link_table:
fs_link_table.pop(entry.path)
- f.close()
# Debug -- list out in-memory table
#for dir in fs_perms_table:
@@ -1424,10 +1424,9 @@ fi
pkgdest = d.getVar('PKGDEST')
pkgdatadir = d.getVar('PKGDESTWORK')
- data_file = pkgdatadir + d.expand("/${PN}" )
- f = open(data_file, 'w')
- f.write("PACKAGES: %s\n" % packages)
- f.close()
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
@@ -1580,6 +1579,7 @@ SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
python package_do_shlibs() {
+ import itertools
import re, pipes
import subprocess
@@ -1777,21 +1777,20 @@ python package_do_shlibs() {
bb.note("Renaming %s to %s" % (old, new))
os.rename(old, new)
pkgfiles[pkg].remove(old)
-
+
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
if len(sonames):
- fd = open(shlibs_file, 'w')
- for s in sonames:
- if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
- (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
- if old_pkg != pkg:
- bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
- bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
- fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (pkg, pkgver)
- fd.close()
+ with open(shlibs_file, 'w') as fd:
+ for s in sonames:
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
if needs_ldconfig and use_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
postinst = d.getVar('pkg_postinst_%s' % pkg)
@@ -1835,16 +1834,16 @@ python package_do_shlibs() {
bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
continue
if n[0] in shlib_provider.keys():
- shlib_provider_path = []
- for k in shlib_provider[n[0]].keys():
- shlib_provider_path.append(k)
- match = None
- for p in list(n[2]) + shlib_provider_path + libsearchpath:
- if p in shlib_provider[n[0]]:
- match = p
- break
- if match:
- (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
@@ -1863,11 +1862,10 @@ python package_do_shlibs() {
deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
if os.path.exists(deps_file):
os.remove(deps_file)
- if len(deps):
- fd = open(deps_file, 'w')
- for dep in sorted(deps):
- fd.write(dep + '\n')
- fd.close()
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
}
python package_do_pkgconfig () {
@@ -1897,9 +1895,8 @@ python package_do_pkgconfig () {
pkgconfig_provided[pkg].append(name)
if not os.access(file, os.R_OK):
continue
- f = open(file, 'r')
- lines = f.readlines()
- f.close()
+ with open(file, 'r') as f:
+ lines = f.readlines()
for l in lines:
m = var_re.match(l)
if m:
@@ -1917,10 +1914,9 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
- f = open(pkgs_file, 'w')
- for p in pkgconfig_provided[pkg]:
- f.write('%s\n' % p)
- f.close()
+ with open(pkgs_file, 'w') as f:
+ for p in pkgconfig_provided[pkg]:
+ f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
@@ -1930,9 +1926,8 @@ python package_do_pkgconfig () {
m = re.match(r'^(.*)\.pclist$', file)
if m:
pkg = m.group(1)
- fd = open(os.path.join(dir, file))
- lines = fd.readlines()
- fd.close()
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
pkgconfig_provided[pkg] = []
for l in lines:
pkgconfig_provided[pkg].append(l.rstrip())
@@ -1950,10 +1945,9 @@ python package_do_pkgconfig () {
bb.note("couldn't find pkgconfig module '%s' in any package" % n)
deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
if len(deps):
- fd = open(deps_file, 'w')
- for dep in deps:
- fd.write(dep + '\n')
- fd.close()
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
}
def read_libdep_files(d):
@@ -1964,9 +1958,8 @@ def read_libdep_files(d):
for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
depsfile = d.expand("${PKGDEST}/" + pkg + extension)
if os.access(depsfile, os.R_OK):
- fd = open(depsfile)
- lines = fd.readlines()
- fd.close()
+ with open(depsfile) as fd:
+ lines = fd.readlines()
for l in lines:
l.rstrip()
deps = bb.utils.explode_dep_versions2(l)
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index a605a57ecc..9145717f98 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -409,7 +409,6 @@ python write_specfile () {
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty RPM package for %s" % splitname)
else:
- bb.note("Creating RPM package for %s" % splitname)
spec_files_top.append('%files')
if extra_pkgdata:
package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
@@ -418,7 +417,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_top.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_top.append('')
continue
@@ -510,7 +509,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_bottom.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_bottom.append('')
del localdata
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
index 94a59e0c03..1541c8fbff 100644
--- a/meta/classes/packagegroup.bbclass
+++ b/meta/classes/packagegroup.bbclass
@@ -8,7 +8,7 @@ PACKAGES = "${PN}"
# By default, packagegroup packages do not depend on a certain architecture.
# Only if dependencies are modified by MACHINE_FEATURES, packages
-# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
+# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
PACKAGE_ARCH ?= "all"
# Fully expanded - so it applies the overrides as well
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 59920a57a7..d03465b6fc 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -8,6 +8,7 @@ COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
+COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
def complementary_globs(featurevar, d):
all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
@@ -63,7 +64,7 @@ python () {
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
-SDK_DEPENDS_append_libc-glibc = " nativesdk-glibc-locale"
+SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
# could be set to the MACHINE_ARCH
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index 800e1175d7..9fda1c9e78 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -20,6 +20,7 @@ SDK_EXT_task-populate-sdk-ext = "-ext"
SDK_EXT_TYPE ?= "full"
SDK_INCLUDE_PKGDATA ?= "0"
SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
+SDK_INCLUDE_NATIVESDK ?= "0"
SDK_RECRDEP_TASKS ?= ""
@@ -378,6 +379,11 @@ python copy_buildsystem () {
f.write('require conf/locked-sigs.inc\n')
f.write('require conf/unlocked-sigs.inc\n')
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
# Write a templateconf.cfg
with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
f.write('meta/conf\n')
@@ -401,9 +407,27 @@ python copy_buildsystem () {
excluded_targets = get_sdk_install_targets(d, images_only=True)
sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ #nativesdk-only sigfile to merge into locked-sigs.inc
+ sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
+ nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
+
+ if sdk_include_nativesdk:
+ oe.copy_buildsystem.prune_lockedsigs([],
+ excluded_targets.split(),
+ nativesigfile,
+ True,
+ nativesigfile_pruned)
+
+ oe.copy_buildsystem.merge_lockedsigs([],
+ sigfile,
+ nativesigfile_pruned,
+ sigfile)
+
oe.copy_buildsystem.prune_lockedsigs([],
excluded_targets.split(),
sigfile,
+ False,
lockedsigs_pruned)
sstate_out = baseoutpath + '/sstate-cache'
@@ -414,13 +438,18 @@ python copy_buildsystem () {
sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
sdk_ext_type = d.getVar('SDK_EXT_TYPE')
- if sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative:
+ if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
# Create the filtered task list used to generate the sstate cache shipped with the SDK
tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
else:
tasklistfn = None
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
# Add packagedata if enabled
if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
@@ -657,9 +686,16 @@ fakeroot python do_populate_sdk_ext() {
d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
# ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
d.delVar('SDKIMAGE_LINGUAS')
+ if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
+ generate_nativesdk_lockedsigs(d)
populate_sdk_common(d)
}
+def generate_nativesdk_lockedsigs(d):
+ import oe.copy_buildsystem
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+
def get_ext_sdk_depends(d):
# Note: the deps varflag is a list not a string, so we need to specify expand=False
deps = d.getVarFlag('do_image_complete', 'deps', False)
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 1c55abfbf3..1a12db1206 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -25,6 +25,19 @@ def errorreport_savedata(e, newdata, file):
json.dump(newdata, f, indent=4, sort_keys=True)
return datafile
+def get_conf_data(e, filename):
+ builddir = e.data.getVar('TOPDIR')
+ filepath = os.path.join(builddir, "conf", filename)
+ jsonstring = ""
+ if os.path.exists(filepath):
+ with open(filepath, 'r') as f:
+ for line in f.readlines():
+ if line.startswith("#") or len(line.strip()) == 0:
+ continue
+ else:
+ jsonstring=jsonstring + line
+ return jsonstring
+
python errorreport_handler () {
import json
import codecs
@@ -51,6 +64,8 @@ python errorreport_handler () {
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['local_conf'] = get_conf_data(e, 'local.conf')
+ data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
@@ -63,19 +78,15 @@ python errorreport_handler () {
taskdata['task'] = task
if log:
try:
- logFile = codecs.open(log, 'r', 'utf-8')
- logdata = logFile.read()
-
+ with codecs.open(log, encoding='utf-8') as logFile:
+ logdata = logFile.read()
# Replace host-specific paths so the logs are cleaner
for d in ("TOPDIR", "TMPDIR"):
s = e.data.getVar(d)
if s:
logdata = logdata.replace(s, d)
-
- logFile.close()
except:
logdata = "Unable to read log file"
-
else:
logdata = "No Log"
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
index 8788ad7145..39b6e40cac 100644
--- a/meta/classes/reproducible_build.bbclass
+++ b/meta/classes/reproducible_build.bbclass
@@ -39,19 +39,28 @@ inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'repr
SDE_DIR ="${WORKDIR}/source-date-epoch"
SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
+SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
SSTATETASKS += "do_deploy_source_date_epoch"
do_deploy_source_date_epoch () {
echo "Deploying SDE to ${SDE_DIR}."
+ mkdir -p ${SDE_DEPLOYDIR}
+ if [ -e ${SDE_FILE} ]; then
+ cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ fi
}
python do_deploy_source_date_epoch_setscene () {
sstate_setscene(d)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
+ if os.path.exists(sde_file):
+ os.rename(sde_file, d.getVar('SDE_FILE'))
}
-do_deploy_source_date_epoch[dirs] = "${SDE_DIR}"
-do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DIR}"
+do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
addtask do_deploy_source_date_epoch_setscene
addtask do_deploy_source_date_epoch before do_configure after do_patch
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index a6bd3f719f..0bbc450100 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -64,6 +64,15 @@ do_rm_work () {
mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
i=dummy
;;
+ *do_image_qa_setscene*)
+ # Ensure we don't 'stack' setscene extensions to this stamp with the section below
+ i=dummy
+ ;;
+ *do_image_qa*)
+ # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
+ i=dummy
+ ;;
*do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
i=dummy
;;
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index fc338161c4..2f171836fa 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -29,7 +29,7 @@ APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro
ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
# Write manifest
-IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.rootfs.manifest"
+IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
@@ -361,7 +361,9 @@ rootfs_reproducible () {
echo $sformatted > ${IMAGE_ROOTFS}/etc/version
bbnote "rootfs_reproducible: set /etc/version to $sformatted"
- find ${IMAGE_ROOTFS}/etc/gconf -name '%gconf.xml' -print0 | xargs -0r \
- sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
+ find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
+ sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ fi
fi
}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 2d3f49eb1a..a14bf53883 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -622,13 +622,14 @@ def check_sanity_version_change(status, d):
# In other words, these tests run once in a given build directory and then
# never again until the sanity version or host distrubution id/version changes.
- # Check the python install is complete. glib-2.0-natives requries
- # xml.parsers.expat
+ # Check the python install is complete. Examples that are often removed in
+ # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
+ # requires distutils.sysconfig.
try:
import xml.parsers.expat
- except ImportError:
- status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
- import stat
+ import distutils.sysconfig
+ except ImportError as e:
+ status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
status.addresult(check_make_version(d))
status.addresult(check_patch_version(d))
@@ -664,6 +665,7 @@ def check_sanity_version_change(status, d):
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ import stat
tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
tmpdirmode = os.stat(tmpdir).st_mode
@@ -798,6 +800,11 @@ def check_sanity_everybuild(status, d):
elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
+ # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
+ sdkvendor = d.getVar("SDK_VENDOR")
+ if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
+ status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash\n")
+
check_supported_distro(d)
omask = os.umask(0o022)
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 11222223a9..64808f8e10 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -89,11 +89,6 @@ SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
the output hash for a task, which in turn is used to determine equivalency. \
"
-SSTATE_HASHEQUIV_SERVER ?= ""
-SSTATE_HASHEQUIV_SERVER[doc] = "The hash equivalence sever. For example, \
- 'http://192.168.0.1:5000'. Do not include a trailing slash \
- "
-
SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
hash equivalency server, such as PN, PV, taskname, etc. This information \
@@ -823,7 +818,7 @@ sstate_unpack_package () {
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
-def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, **kwargs):
+def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
found = set()
missed = set()
extension = ".tgz"
@@ -956,16 +951,17 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, **kwargs):
evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
- # Print some summary statistics about the current task completion and how much sstate
- # reuse there was. Avoid divide by zero errors.
- total = len(sq_data['hash'])
- complete = 0
- if currentcount:
- complete = (len(found) + currentcount) / (total + currentcount) * 100
- match = 0
- if total:
- match = len(found) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
+ if summary:
+ # Print some summary statistics about the current task completion and how much sstate
+ # reuse there was. Avoid divide by zero errors.
+ total = len(sq_data['hash'])
+ complete = 0
+ if currentcount:
+ complete = (len(found) + currentcount) / (total + currentcount) * 100
+ match = 0
+ if total:
+ match = len(found) / total * 100
+ bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 55a9b52ed2..cca0b7e0d6 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -449,6 +449,7 @@ python extend_recipe_sysroot() {
msg_exists = []
msg_adding = []
+ # Handle all removals first since files may move between recipes
for dep in configuredeps:
c = setscenedeps[dep][0]
if c not in installed:
@@ -459,7 +460,6 @@ python extend_recipe_sysroot() {
if os.path.exists(depdir + "/" + c):
lnk = os.readlink(depdir + "/" + c)
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
- msg_exists.append(c)
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
@@ -470,6 +470,20 @@ python extend_recipe_sysroot() {
elif os.path.lexists(depdir + "/" + c):
os.unlink(depdir + "/" + c)
+ # Now handle installs
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if c not in installed:
+ continue
+ taskhash = setscenedeps[dep][5]
+ taskmanifest = depdir + "/" + c + "." + taskhash
+
+ if os.path.exists(depdir + "/" + c):
+ lnk = os.readlink(depdir + "/" + c)
+ if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
+ msg_exists.append(c)
+ continue
+
msg_adding.append(c)
os.symlink(c + "." + taskhash, depdir + "/" + c)
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
index 3cd6811a6c..336c4c2ff5 100644
--- a/meta/classes/systemd-boot.bbclass
+++ b/meta/classes/systemd-boot.bbclass
@@ -11,50 +11,25 @@
do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-EFIDIR = "/EFI/BOOT"
+require conf/image-uefi.conf
# Need UUID utility code.
inherit fs-uuid
efi_populate() {
- DEST=$1
+ efi_populate_common "$1" systemd
- EFI_IMAGE="systemd-bootia32.efi"
- DEST_EFI_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- EFI_IMAGE="systemd-bootx64.efi"
- DEST_EFI_IMAGE="bootx64.efi"
- fi
-
- install -d ${DEST}${EFIDIR}
# systemd-boot requires these paths for configuration files
# they are not customizable so no point in new vars
install -d ${DEST}/loader
install -d ${DEST}/loader/entries
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
for i in ${SYSTEMD_BOOT_ENTRIES}; do
install -m 0644 ${i} ${DEST}/loader/entries
done
}
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+efi_iso_populate_append() {
cp -r $iso_dir/loader ${EFIIMGDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
}
inherit systemd-boot-cfg
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index 747055b8fa..9e8a82c9f1 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -32,11 +32,7 @@ if type systemctl >/dev/null 2>/dev/null; then
if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
for service in ${SYSTEMD_SERVICE_ESCAPED}; do
- case "${service}" in
- *@*)
- systemctl ${OPTS} enable "${service}"
- ;;
- esac
+ systemctl ${OPTS} enable "$service"
done
fi
@@ -231,5 +227,6 @@ python rm_sysvinit_initddir (){
do_install[postfuncs] += "${RMINITDIR} "
RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
RMINITDIR = ""
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index 525c5a6173..844ed87944 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -262,6 +262,24 @@ def testimage_main(d):
# It would be better to find these modules using instrospection.
target_kwargs['target_modules_path'] = d.getVar('BBPATH')
+ # hardware controlled targets might need further access
+ target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
+ target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
+ target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
+
+ def export_ssh_agent(d):
+ import os
+
+ variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
+ for v in variables:
+ if v not in os.environ.keys():
+ val = d.getVar(v)
+ if val is not None:
+ os.environ[v] = val
+
+ export_ssh_agent(d)
+
# runtime use network for download projects for build
export_proxies(d)
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
deleted file mode 100644
index 00f453cec1..0000000000
--- a/meta/classes/tinderclient.bbclass
+++ /dev/null
@@ -1,368 +0,0 @@
-def tinder_http_post(server, selector, content_type, body):
- import httplib
- # now post it
- for i in range(0,5):
- try:
- h = httplib.HTTP(server)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
- #print(errcode, errmsg, headers)
- return (errcode,errmsg, headers, h.file)
- except:
- print("Error sending the report!")
- # try again
- pass
-
- # return some garbage
- return (-1, "unknown", "unknown", None)
-
-def tinder_form_data(bound, dict, log):
- output = []
- # for each key in the dictionary
- for name in dict:
- assert dict[name]
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="%s"' % name )
- output.append( "" )
- output.append( dict[name] )
- if log:
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
- output.append( '' )
- output.append( log )
- output.append( '--' + bound + '--' )
- output.append( '' )
-
- return "\r\n".join(output)
-
-def tinder_time_string():
- """
- Return the time as GMT
- """
- return ""
-
-def tinder_format_http_post(d,status,log):
- """
- Format the Tinderbox HTTP post with the data needed
- for the tinderbox to be happy.
- """
-
- import random
-
- # the variables we will need to send on this form post
- variables = {
- "tree" : d.getVar('TINDER_TREE'),
- "machine_name" : d.getVar('TINDER_MACHINE'),
- "os" : os.uname()[0],
- "os_version" : os.uname()[2],
- "compiler" : "gcc",
- "clobber" : d.getVar('TINDER_CLOBBER') or "0",
- "srcdate" : d.getVar('SRCDATE'),
- "PN" : d.getVar('PN'),
- "PV" : d.getVar('PV'),
- "PR" : d.getVar('PR'),
- "FILE" : d.getVar('FILE') or "N/A",
- "TARGETARCH" : d.getVar('TARGET_ARCH'),
- "TARGETFPU" : d.getVar('TARGET_FPU') or "Unknown",
- "TARGETOS" : d.getVar('TARGET_OS') or "Unknown",
- "MACHINE" : d.getVar('MACHINE') or "Unknown",
- "DISTRO" : d.getVar('DISTRO') or "Unknown",
- "zecke-rocks" : "sure",
- }
-
- # optionally add the status
- if status:
- variables["status"] = str(status)
-
- # try to load the machine id
- # we only need on build_status.pl but sending it
- # always does not hurt
- try:
- f = open(d.getVar('TMPDIR')+'/tinder-machine.id', 'r')
- id = f.read()
- variables['machine_id'] = id
- except:
- pass
-
- # the boundary we will need
- boundary = "----------------------------------%d" % int(random.random()*1000000000000)
-
- # now format the body
- body = tinder_form_data( boundary, variables, log )
-
- return ("multipart/form-data; boundary=%s" % boundary),body
-
-
-def tinder_build_start(d):
- """
- Inform the tinderbox that a build is starting. We do this
- by posting our name and tree to the build_start.pl script
- on the server.
- """
-
- # get the body and type
- content_type, body = tinder_format_http_post(d,None,None)
- server = d.getVar('TINDER_HOST')
- url = d.getVar('TINDER_URL')
-
- selector = url + "/xml/build_start.pl"
-
- #print("selector %s and url %s" % (selector, url))
-
- # now post it
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- report = h_file.read()
-
- # now let us find the machine id that was assigned to us
- search = "<machine id='"
- report = report[report.find(search)+len(search):]
- report = report[0:report.find("'")]
-
- bb.note("Machine ID assigned by tinderbox: %s" % report )
-
- # now we will need to save the machine number
- # we will override any previous numbers
- f = open(d.getVar('TMPDIR')+"/tinder-machine.id", 'w')
- f.write(report)
-
-
-def tinder_send_http(d, status, _log):
- """
- Send this log as build status
- """
-
- # get the body and type
- server = d.getVar('TINDER_HOST')
- url = d.getVar('TINDER_URL')
-
- selector = url + "/xml/build_status.pl"
-
- # now post it - in chunks of 10.000 characters
- new_log = _log
- while len(new_log) > 0:
- content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- #print(h.file.read())
- new_log = new_log[18000:]
-
-
-def tinder_print_info(d):
- """
- Print the TinderBox Info
- Including informations of the BaseSystem and the Tree
- we use.
- """
-
- # get the local vars
- time = tinder_time_string()
- ops = os.uname()[0]
- version = os.uname()[2]
- url = d.getVar('TINDER_URL')
- tree = d.getVar('TINDER_TREE')
- branch = d.getVar('TINDER_BRANCH')
- srcdate = d.getVar('SRCDATE')
- machine = d.getVar('MACHINE')
- distro = d.getVar('DISTRO')
- bbfiles = d.getVar('BBFILES')
- tarch = d.getVar('TARGET_ARCH')
- fpu = d.getVar('TARGET_FPU')
- oerev = d.getVar('OE_REVISION') or "unknown"
-
- # there is a bug with tipple quoted strings
- # i will work around but will fix the original
- # bug as well
- output = []
- output.append("== Tinderbox Info" )
- output.append("Time: %(time)s" )
- output.append("OS: %(ops)s" )
- output.append("%(version)s" )
- output.append("Compiler: gcc" )
- output.append("Tinderbox Client: 0.1" )
- output.append("Tinderbox Client Last Modified: yesterday" )
- output.append("Tinderbox Protocol: 0.1" )
- output.append("URL: %(url)s" )
- output.append("Tree: %(tree)s" )
- output.append("Config:" )
- output.append("branch = '%(branch)s'" )
- output.append("TARGET_ARCH = '%(tarch)s'" )
- output.append("TARGET_FPU = '%(fpu)s'" )
- output.append("SRCDATE = '%(srcdate)s'" )
- output.append("MACHINE = '%(machine)s'" )
- output.append("DISTRO = '%(distro)s'" )
- output.append("BBFILES = '%(bbfiles)s'" )
- output.append("OEREV = '%(oerev)s'" )
- output.append("== End Tinderbox Client Info" )
-
- # now create the real output
- return "\n".join(output) % vars()
-
-
-def tinder_print_env():
- """
- Print the environment variables of this build
- """
- time_start = tinder_time_string()
- time_end = tinder_time_string()
-
- # build the environment
- env = ""
- for var in os.environ:
- env += "%s=%s\n" % (var, os.environ[var])
-
- output = []
- output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
- output.append( env )
- output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
-
- return "\n".join(output) % vars()
-
-def tinder_tinder_start(d, event):
- """
- PRINT the configuration of this build
- """
-
- time_start = tinder_time_string()
- config = tinder_print_info(d)
- #env = tinder_print_env()
- time_end = tinder_time_string()
- packages = " ".join( event.getPkgs() )
-
- output = []
- output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
- output.append( config )
- #output.append( env )
- output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
- output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
- output.append( "<--- TINDERBOX STARTING BUILD NOW" )
-
- output.append( "" )
-
- return "\n".join(output) % vars()
-
-def tinder_do_tinder_report(event):
- """
- Report to the tinderbox:
- On the BuildStart we will inform the box directly
- On the other events we will write to the TINDER_LOG and
- when the Task is finished we will send the report.
-
- The above is not yet fully implemented. Currently we send
- information immediately. The caching/queuing needs to be
- implemented. Also sending more or less information is not
- implemented yet.
-
- We have two temporary files stored in the TMP directory. One file
- contains the assigned machine id for the tinderclient. This id gets
- assigned when we connect the box and start the build process the second
- file is used to workaround an EventHandler limitation. If BitBake is ran
- with the continue option we want the Build to fail even if we get the
- BuildCompleted Event. In this case we have to look up the status and
- send it instead of 100/success.
- """
- import glob
-
- # variables
- name = bb.event.getName(event)
- log = ""
- status = 1
- # Check what we need to do Build* shows we start or are done
- if name == "BuildStarted":
- tinder_build_start(event.data)
- log = tinder_tinder_start(event.data,event)
-
- try:
- # truncate the tinder log file
- f = open(event.data.getVar('TINDER_LOG'), 'w')
- f.write("")
- f.close()
- except:
- pass
-
- try:
- # write a status to the file. This is needed for the -k option
- # of BitBake
- g = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- g.write("")
- g.close()
- except IOError:
- pass
-
- # Append the Task-Log (compile,configure...) to the log file
- # we will send to the server
- if name == "TaskSucceeded" or name == "TaskFailed":
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T'), event.task))
-
- if len(log_file) != 0:
- to_file = event.data.getVar('TINDER_LOG')
- log += "".join(open(log_file[0], 'r').readlines())
-
- # set the right 'HEADER'/Summary for the TinderBox
- if name == "TaskStarted":
- log += "---> TINDERBOX Task %s started\n" % event.task
- elif name == "TaskSucceeded":
- log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
- elif name == "TaskFailed":
- log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
- elif name == "PkgStarted":
- log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF')
- elif name == "PkgSucceeded":
- log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF')
- elif name == "PkgFailed":
- if not event.data.getVar('TINDER_AUTOBUILD') == "0":
- build.exec_task('do_clean', event.data)
- log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF')
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- h.write("200")
- elif name == "BuildCompleted":
- log += "Build Completed\n"
- status = 100
- # Check if we have a old status...
- try:
- h = open(event.data.getVar('TMPDIR')+'/tinder-status', 'r')
- status = int(h.read())
- except:
- pass
-
- elif name == "MultipleProviders":
- log += "---> TINDERBOX Multiple Providers\n"
- log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
- log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
- log += "is runtime: %d\n" % event.isRuntime()
- log += "<--- TINDERBOX Multiple Providers\n"
- elif name == "NoProvider":
- log += "Error: No Provider for: %s\n" % event.getItem()
- log += "Error:Was Runtime: %d\n" % event.isRuntime()
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- h.write("200")
-
- # now post the log
- if len(log) == 0:
- return
-
- # for now we will use the http post method as it is the only one
- log_post_method = tinder_send_http
- log_post_method(event.data, status, log)
-
-
-# we want to be an event handler
-addhandler tinderclient_eventhandler
-python tinderclient_eventhandler() {
- if e.data is None or bb.event.getName(e) == "MsgNote":
- return
-
- do_tinder_report = e.data.getVar('TINDER_REPORT')
- if do_tinder_report and do_tinder_report == "1":
- tinder_do_tinder_report(e)
-
- return
-}
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index 1a2ec4f3b2..db1d3215ef 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -90,6 +90,7 @@ toolchain_shared_env_script () {
echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
+ echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
echo 'export AR=${TARGET_PREFIX}ar' >> $script
echo 'export NM=${TARGET_PREFIX}nm' >> $script
echo 'export M4=m4' >> $script
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 3326c0db3d..9f8645a36a 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -45,7 +45,7 @@ python uninative_event_fetchloader() {
tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
tarballpath = os.path.join(tarballdir, tarball)
- if not os.path.exists(tarballpath):
+ if not os.path.exists(tarballpath + ".done"):
bb.utils.mkdirhier(tarballdir)
if d.getVar("UNINATIVE_URL") == "unset":
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
index a0a7f4911e..263d8aea4f 100644
--- a/meta/conf/bitbake.conf
+++ b/meta/conf/bitbake.conf
@@ -872,7 +872,7 @@ BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH BBSERVER DL_DI
CCACHE_DIR EXTERNAL_TOOLCHAIN CCACHE CCACHE_NOHASHDIR LICENSE_PATH SDKPKGSUFFIX \
WARN_QA ERROR_QA WORKDIR STAMPCLEAN PKGDATA_DIR BUILD_ARCH SSTATE_PKGARCH \
BB_WORKERCONTEXT BB_LIMITEDDEPS BB_UNIHASH extend_recipe_sysroot DEPLOY_DIR \
- SSTATE_HASHEQUIV_METHOD SSTATE_HASHEQUIV_SERVER SSTATE_HASHEQUIV_REPORT_TASKDATA \
+ SSTATE_HASHEQUIV_METHOD SSTATE_HASHEQUIV_REPORT_TASKDATA \
SSTATE_HASHEQUIV_OWNER CCACHE_TOP_DIR BB_HASHSERVE"
BB_HASHCONFIG_WHITELIST ?= "${BB_HASHBASE_WHITELIST} DATE TIME SSH_AGENT_PID \
SSH_AUTH_SOCK PSEUDO_BUILD BB_ENV_EXTRAWHITE DISABLE_SANITY_CHECKS \
diff --git a/meta/conf/distro/include/maintainers.inc b/meta/conf/distro/include/maintainers.inc
index 5f21f98b2b..d85e5b697f 100644
--- a/meta/conf/distro/include/maintainers.inc
+++ b/meta/conf/distro/include/maintainers.inc
@@ -355,7 +355,7 @@ RECIPE_MAINTAINER_pn-libmatchbox = "Ross Burton <ross.burton@intel.com>"
RECIPE_MAINTAINER_pn-libmnl = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-libmpc = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-libmodule-build-perl = "Tim Orling <timothy.t.orling@linux.intel.com>"
-RECIPE_MAINTAINER_pn-libmodulemd = "Alexander Kanavin <alex.kanavin@gmail.com>"
+RECIPE_MAINTAINER_pn-libmodulemd-v1 = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-libnewt = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-libnewt-python = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-libnl = "Alexander Kanavin <alex.kanavin@gmail.com>"
@@ -718,7 +718,6 @@ RECIPE_MAINTAINER_pn-wayland = "Denys Dmytriyenko <denys@ti.com>"
RECIPE_MAINTAINER_pn-wayland-protocols = "Denys Dmytriyenko <denys@ti.com>"
RECIPE_MAINTAINER_pn-webkitgtk = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-weston = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-weston-conf = "Denys Dmytriyenko <denys@ti.com>"
RECIPE_MAINTAINER_pn-weston-init = "Denys Dmytriyenko <denys@ti.com>"
RECIPE_MAINTAINER_pn-wget = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER_pn-which = "Anuj Mittal <anuj.mittal@intel.com>"
diff --git a/meta/conf/distro/include/ptest-packagelists.inc b/meta/conf/distro/include/ptest-packagelists.inc
index 0dc085091f..bb4724cf52 100644
--- a/meta/conf/distro/include/ptest-packagelists.inc
+++ b/meta/conf/distro/include/ptest-packagelists.inc
@@ -77,3 +77,5 @@ PTESTS_SLOW = "\
util-linux-ptest \
valgrind-ptest \
"
+
+PTESTS_SLOW_remove_riscv64 = "valgrind-ptest"
diff --git a/meta/conf/distro/include/tcmode-default.inc b/meta/conf/distro/include/tcmode-default.inc
index d8f434609b..4a23c50631 100644
--- a/meta/conf/distro/include/tcmode-default.inc
+++ b/meta/conf/distro/include/tcmode-default.inc
@@ -26,7 +26,7 @@ QEMUVERSION ?= "4.1%"
GOVERSION ?= "1.12%"
# This can not use wildcards like 8.0.% since it is also used in mesa to denote
# llvm version being used, so always bump it with llvm recipe version bump
-LLVMVERSION ?= "8.0.1"
+LLVMVERSION ?= "9.0.0"
PREFERRED_VERSION_gcc ?= "${GCCVERSION}"
PREFERRED_VERSION_gcc-cross-${TARGET_ARCH} ?= "${GCCVERSION}"
diff --git a/meta/conf/distro/include/yocto-uninative.inc b/meta/conf/distro/include/yocto-uninative.inc
index df24346855..ad75d3e2a3 100644
--- a/meta/conf/distro/include/yocto-uninative.inc
+++ b/meta/conf/distro/include/yocto-uninative.inc
@@ -6,9 +6,9 @@
# to the distro running on the build machine.
#
-UNINATIVE_MAXGLIBCVERSION = "2.29"
+UNINATIVE_MAXGLIBCVERSION = "2.30"
-UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.6/"
-UNINATIVE_CHECKSUM[aarch64] ?= "a37118fc8b423f48146120707b81dd15017512c3e8ef9e6ca2cb3a033f4f4046"
-UNINATIVE_CHECKSUM[i686] ?= "3234fc3ded810225071f23a0e9a99f4f8c2480059945a848eff076ce78122ade"
-UNINATIVE_CHECKSUM[x86_64] ?= "133387753a9acf3e1b788103c59fac91e968e2ee331d7a4b9498e926ada7be57"
+UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.7/"
+UNINATIVE_CHECKSUM[aarch64] ?= "e76a45886ee8a0b3904b761c17ac8ff91edf9811ee455f1832d10763ba794dfc"
+UNINATIVE_CHECKSUM[i686] ?= "810d027dfb1c7675226afbcec07808770516c969ee7378f6d8240281083f8924"
+UNINATIVE_CHECKSUM[x86_64] ?= "9498d8bba047499999a7310ac2576d0796461184965351a56f6d32c888a1f216"
diff --git a/meta/conf/image-uefi.conf b/meta/conf/image-uefi.conf
new file mode 100644
index 0000000000..aaeff12ccb
--- /dev/null
+++ b/meta/conf/image-uefi.conf
@@ -0,0 +1,16 @@
+# Location of EFI files inside EFI System Partition
+EFIDIR ?= "/EFI/BOOT"
+
+# Prefix where ESP is mounted inside rootfs. Set to empty if package is going
+# to be installed to ESP directly
+EFI_PREFIX ?= "/boot"
+
+# Location inside rootfs.
+EFI_FILES_PATH = "${EFI_PREFIX}${EFIDIR}"
+
+# Determine name of bootloader image
+EFI_BOOT_IMAGE ?= "bootINVALID.efi"
+EFI_BOOT_IMAGE_x86-64 = "bootx64.efi"
+EFI_BOOT_IMAGE_x86 = "bootia32.efi"
+EFI_BOOT_IMAGE_aarch64 = "bootaa64.efi"
+EFI_BOOT_IMAGE_arm = "bootarm.efi"
diff --git a/meta/conf/layer.conf b/meta/conf/layer.conf
index 5ecb93651e..fcdf9ae5a3 100644
--- a/meta/conf/layer.conf
+++ b/meta/conf/layer.conf
@@ -7,12 +7,12 @@ BBFILE_COLLECTIONS += "core"
BBFILE_PATTERN_core = "^${LAYERDIR}/"
BBFILE_PRIORITY_core = "5"
-LAYERSERIES_CORENAMES = "warrior"
+LAYERSERIES_CORENAMES = "zeus"
# This should only be incremented on significant changes that will
# cause compatibility issues with other layers
LAYERVERSION_core = "11"
-LAYERSERIES_COMPAT_core = "warrior"
+LAYERSERIES_COMPAT_core = "warrior zeus"
BBLAYERS_LAYERINDEX_NAME_core = "openembedded-core"
@@ -73,7 +73,7 @@ SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += " \
font-alias->font-util \
systemd-boot->systemd-bootconf \
systemd->systemd-conf \
- weston->weston-conf \
+ weston->weston-init \
weston-init->weston \
weston-init->kbd \
connman->xl2tpd \
diff --git a/meta/conf/local.conf.sample b/meta/conf/local.conf.sample
index 92afac2fe1..783d2c4c16 100644
--- a/meta/conf/local.conf.sample
+++ b/meta/conf/local.conf.sample
@@ -212,6 +212,20 @@ PACKAGECONFIG_append_pn-qemu-system-native = " sdl"
PACKAGECONFIG_append_pn-nativesdk-qemu = " sdl"
#ASSUME_PROVIDED += "libsdl2-native"
+#
+# Hash Equivalence
+#
+# Enable support for automatically running a local hash equivalence server and
+# instruct bitbake to use a hash equivalence aware signature generator. Hash
+# equivalence improves reuse of sstate by detecting when a given sstate
+# artifact can be reused as equivalent, even if the current task hash doesn't
+# match the one that generated the artifact.
+#
+# A shared hash equivalent server can be set with "<HOSTNAME>:<PORT>" format
+#
+#BB_HASHSERVE = "auto"
+#BB_SIGNATURE_HANDLER = "OEEquivHash"
+
# CONF_VERSION is increased each time build/conf/ changes incompatibly and is used to
# track the version of this file when it was generated. This can safely be ignored if
# this doesn't mean anything to you.
diff --git a/meta/conf/machine/include/riscv/qemuriscv.inc b/meta/conf/machine/include/riscv/qemuriscv.inc
index df35f2808f..a42346f361 100644
--- a/meta/conf/machine/include/riscv/qemuriscv.inc
+++ b/meta/conf/machine/include/riscv/qemuriscv.inc
@@ -36,16 +36,3 @@ QB_TCPSERIAL_OPT = " -device virtio-serial-device -chardev socket,id=virtcon,por
# Add the 'virtio-rng-pci' device otherwise the guest may run out of entropy
QB_OPT_APPEND = " -object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-device,rng=rng0"
-BAD_RECOMMENDATIONS += "\
- libcxx-dev \
- libcxx-staticdev \
- compiler-rt-dev \
- compiler-rt-staticdev \
-"
-
-ASSUME_PROVIDED += "\
- libcxx-dev \
- libcxx-staticdev \
- compiler-rt-dev \
- compiler-rt-staticdev \
-"
diff --git a/meta/conf/machine/include/tune-cortexa57-cortexa53.inc b/meta/conf/machine/include/tune-cortexa57-cortexa53.inc
new file mode 100644
index 0000000000..d05e93f51e
--- /dev/null
+++ b/meta/conf/machine/include/tune-cortexa57-cortexa53.inc
@@ -0,0 +1,18 @@
+DEFAULTTUNE ?= "cortexa57-cortexa53"
+require conf/machine/include/arm/arch-armv8a.inc
+
+TUNEVALID[cortexa57-cortexa53] = "Enable big.LITTLE Cortex-A57.Cortex-A53 specific processor optimizations"
+TUNECONFLICTS[aarch64] = "armv4 armv5 armv6 armv7 armv7a"
+
+TUNE_CCARGS .= "${@bb.utils.contains("TUNE_FEATURES", "aarch64", " -march=armv8-a", "" ,d)}"
+
+MACHINEOVERRIDES =. "${@bb.utils.contains("TUNE_FEATURES", "cortexa57-cortexa53", "cortexa57-cortexa53:", "" ,d)}"
+
+TUNE_CCARGS .= "${@bb.utils.contains("TUNE_FEATURES", "cortexa57-cortexa53", " -mtune=cortex-a57.cortex-a53", "", d)}"
+
+# Little Endian base configs
+AVAILTUNES += "cortexa57-cortexa53"
+ARMPKGARCH_tune-cortexa57-cortexa53 = "cortexa57-cortexa53"
+TUNE_FEATURES_tune-cortexa57-cortexa53 = "${TUNE_FEATURES_tune-aarch64} cortexa57-cortexa53"
+PACKAGE_EXTRA_ARCHS_tune-cortexa57-cortexa53 = "${PACKAGE_EXTRA_ARCHS_tune-aarch64} cortexa57-cortexa53"
+BASE_LIB_tune-cortexa57-cortexa53 = "lib64"
diff --git a/meta/conf/machine/qemuriscv64.conf b/meta/conf/machine/qemuriscv64.conf
index 99b48b309b..b45fdd556d 100644
--- a/meta/conf/machine/qemuriscv64.conf
+++ b/meta/conf/machine/qemuriscv64.conf
@@ -5,5 +5,5 @@
require conf/machine/include/riscv/qemuriscv.inc
EXTRA_IMAGEDEPENDS += "u-boot"
-UBOOT_MACHINE = "qemu-riscv64_defconfig"
+UBOOT_MACHINE = "qemu-riscv64_smode_defconfig"
UBOOT_ELF = "u-boot"
diff --git a/meta/conf/multilib.conf b/meta/conf/multilib.conf
index 65a28ddbd2..cfed3fbbd0 100644
--- a/meta/conf/multilib.conf
+++ b/meta/conf/multilib.conf
@@ -29,4 +29,4 @@ PKG_CONFIG_PATH[vardepvalueexclude] = ":${WORKDIR}/recipe-sysroot/${datadir}/pkg
# These recipes don't need multilib variants, the ${BPN} PROVDES/RPROVDES
# ${MLPREFIX}${BPN}
-NON_MULTILIB_RECIPES = "grub grub-efi make-mod-scripts"
+NON_MULTILIB_RECIPES = "grub grub-efi make-mod-scripts ovmf"
diff --git a/meta/conf/sanity.conf b/meta/conf/sanity.conf
index 92e1886990..8b2f655394 100644
--- a/meta/conf/sanity.conf
+++ b/meta/conf/sanity.conf
@@ -3,7 +3,7 @@
# See sanity.bbclass
#
# Expert users can confirm their sanity with "touch conf/sanity.conf"
-BB_MIN_VERSION = "1.43.1"
+BB_MIN_VERSION = "1.43.2"
SANITY_ABIFILE = "${TMPDIR}/abi_version"
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index 708e1b388e..5b28774c98 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -413,7 +413,7 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
- if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index e25122e815..f02fbe9fba 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -24,6 +24,8 @@ class ClassExtender(object):
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
+ if name.startswith("/"):
+ return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index cb663b21c6..31a84f5b06 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -177,7 +177,7 @@ def generate_locked_sigs(sigfile, d):
tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
-def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
@@ -187,7 +187,11 @@ def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
- f.write(line)
+ if onlynative:
+ if 'nativesdk' in splitval[0]:
+ f.write(line)
+ else:
+ f.write(line)
else:
f.write(line)
invalue = False
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index 4f2b419edc..43e46380d7 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -110,12 +110,12 @@ def distro_identifier(adjust_hook=None):
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
- return "Unknown"
- # Filter out any non-alphanumerics
- distro_id = re.sub(r'\W', '', distro_id)
+ return "unknown"
+ # Filter out any non-alphanumerics and convert to lowercase
+ distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
- id_str = '{0}-{1}'.format(distro_id.lower(), release)
+ id_str = '{0}-{1}'.format(distro_id, release)
else:
id_str = distro_id
return id_str.replace(' ','-').replace('/','-')
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
index 7c373715ad..c7135ce918 100644
--- a/meta/lib/oe/package_manager.py
+++ b/meta/lib/oe/package_manager.py
@@ -298,7 +298,7 @@ class DpkgIndexer(Indexer):
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
+
index_cmds.append(cmd)
deb_dirs_found = True
@@ -570,6 +570,8 @@ class PackageManager(object, metaclass=ABCMeta):
for lang in split_linguas:
globs += " *-locale-%s" % lang
+ for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
+ globs += (" " + complementary_linguas) % lang
if globs is None:
return
@@ -655,7 +657,7 @@ def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencie
pn = d.getVar("PN")
seendirs = set()
multilibs = {}
-
+
bb.utils.remove(subrepo_dir, recurse=True)
bb.utils.mkdirhier(subrepo_dir)
@@ -1006,8 +1008,8 @@ class RpmPM(PackageManager):
def load_old_install_solution(self):
if not os.path.exists(self.solution_manifest):
return []
-
- return open(self.solution_manifest, 'r').read().split()
+ with open(self.solution_manifest, 'r') as fd:
+ return fd.read().split()
def _script_num_prefix(self, path):
files = os.listdir(path)
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index cbde380b03..a82085a792 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -17,9 +17,8 @@ def read_pkgdatafile(fn):
if os.access(fn, os.R_OK):
import re
- f = open(fn, 'r')
- lines = f.readlines()
- f.close()
+ with open(fn, 'r') as f:
+ lines = f.readlines()
r = re.compile("([^:]+):\s*(.*)")
for l in lines:
m = r.match(l)
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index b4fbdb799e..d02a274812 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -88,10 +88,6 @@ class Sdk(object, metaclass=ABCMeta):
bb.warn("cannot remove SDK dir: %s" % path)
def install_locales(self, pm):
- # This is only relevant for glibc
- if self.d.getVar("TCLIBC") != "glibc":
- return
-
linguas = self.d.getVar("SDKIMAGE_LINGUAS")
if linguas:
import fnmatch
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 0c7a6f5ed8..c566ce5a0c 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -90,8 +90,7 @@ class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
-class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
- name = "OEBasicHash"
+class SignatureGeneratorOEBasicHashMixIn(object):
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
@@ -129,12 +128,11 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
def get_taskdata(self):
- data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
- return (data, self.lockedpnmap, self.lockedhashfn)
+ return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
- coredata, self.lockedpnmap, self.lockedhashfn = data
- super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+ self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
+ super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
sigfile = os.getcwd() + "/locked-sigs.inc"
@@ -171,10 +169,11 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
self.lockedhashes[tid] = h_locked
+ unihash = super().get_unihash(tid)
self.taskhash[tid] = h_locked
#bb.warn("Using %s %s %s" % (recipename, task, h))
- if h != h_locked:
+ if h != h_locked and h_locked != unihash:
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
@@ -182,6 +181,11 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
#bb.warn("%s %s %s" % (recipename, task, h))
return h
+ def get_unihash(self, tid):
+ if tid in self.lockedhashes:
+ return self.lockedhashes[tid]
+ return super().get_unihash(tid)
+
def dump_sigtask(self, fn, task, stampbase, runtime):
tid = fn + ":" + task
if tid in self.lockedhashes:
@@ -211,7 +215,7 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
(_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
if tid not in self.taskhash:
continue
- f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[tid] + " \\\n")
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
@@ -257,18 +261,17 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
if error_msgs:
bb.fatal("\n".join(error_msgs))
-class SignatureGeneratorOEEquivHash(bb.siggen.SignatureGeneratorUniHashMixIn, SignatureGeneratorOEBasicHash):
+class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+
+class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
name = "OEEquivHash"
def init_rundepcheck(self, data):
super().init_rundepcheck(data)
- autostart = data.getVar('BB_HASHSERVE')
- if autostart:
- self.server = "http://" + autostart
- else:
- self.server = data.getVar('SSTATE_HASHEQUIV_SERVER')
+ self.server = data.getVar('BB_HASHSERVE')
if not self.server:
- bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_SERVER or BB_HASHSERVE to be set")
+ bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
if not self.method:
bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 9bda3efdc7..a1daa2bed6 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -55,7 +55,7 @@ class XTerminal(Terminal):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
- command = 'gnome-terminal -t "{title}" -x {command}'
+ command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index 77ee7ee541..bbbabafbf6 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -154,7 +154,8 @@ def path(value, relativeto='', normalize='true', mustexist='false'):
if boolean(mustexist):
try:
- open(value, 'r')
+ with open(value, 'r'):
+ pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
@@ -183,4 +184,3 @@ def qemu_use_kvm(kvm, target_arch):
elif build_arch == target_arch:
use_kvm = True
return use_kvm
-
diff --git a/meta/lib/oeqa/core/case.py b/meta/lib/oeqa/core/case.py
index aca144e9dc..aae451fef2 100644
--- a/meta/lib/oeqa/core/case.py
+++ b/meta/lib/oeqa/core/case.py
@@ -4,6 +4,8 @@
# SPDX-License-Identifier: MIT
#
+import base64
+import zlib
import unittest
from oeqa.core.exception import OEQAMissingVariable
@@ -49,3 +51,50 @@ class OETestCase(unittest.TestCase):
for d in self.decorators:
d.tearDownDecorator()
self.tearDownMethod()
+
+class OEPTestResultTestCase:
+ """
+ Mix-in class to provide functions to make interacting with extraresults for
+ the purposes of storing ptestresult data.
+ """
+ @staticmethod
+ def _compress_log(log):
+ logdata = log.encode("utf-8") if isinstance(log, str) else log
+ logdata = zlib.compress(logdata)
+ logdata = base64.b64encode(logdata).decode("utf-8")
+ return {"compressed" : logdata}
+
+ def ptest_rawlog(self, log):
+ if not hasattr(self, "extraresults"):
+ self.extraresults = {"ptestresult.sections" : {}}
+ self.extraresults["ptestresult.rawlogs"] = {"log" : self._compress_log(log)}
+
+ def ptest_section(self, section, duration = None, log = None, logfile = None, exitcode = None):
+ if not hasattr(self, "extraresults"):
+ self.extraresults = {"ptestresult.sections" : {}}
+
+ sections = self.extraresults.get("ptestresult.sections")
+ if section not in sections:
+ sections[section] = {}
+
+ if log is not None:
+ sections[section]["log"] = self._compress_log(log)
+ elif logfile is not None:
+ with open(logfile, "rb") as f:
+ sections[section]["log"] = self._compress_log(f.read())
+
+ if duration is not None:
+ sections[section]["duration"] = duration
+ if exitcode is not None:
+ sections[section]["exitcode"] = exitcode
+
+ def ptest_result(self, section, test, result):
+ if not hasattr(self, "extraresults"):
+ self.extraresults = {"ptestresult.sections" : {}}
+
+ sections = self.extraresults.get("ptestresult.sections")
+ if section not in sections:
+ sections[section] = {}
+ resultname = "ptestresult.{}.{}".format(section, test)
+ self.extraresults[resultname] = {"status" : result}
+
diff --git a/meta/lib/oeqa/core/decorator/data.py b/meta/lib/oeqa/core/decorator/data.py
index babc9789d6..12d462f202 100644
--- a/meta/lib/oeqa/core/decorator/data.py
+++ b/meta/lib/oeqa/core/decorator/data.py
@@ -113,3 +113,21 @@ class skipIfNotFeature(OETestDecorator):
self.logger.debug(msg)
if not has_feature(self.case.td, self.value):
self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfFeature(OETestDecorator):
+ """
+ Skip test based on DISTRO_FEATURES.
+
+ value must not be in distro features or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is not in DISTRO_FEATURES '
+ 'or IMAGE_FEATURES' % (self.value))
+ self.logger.debug(msg)
+ if has_feature(self.case.td, self.value):
+ self.case.skipTest(self.msg)
diff --git a/meta/lib/oeqa/core/utils/concurrencytest.py b/meta/lib/oeqa/core/utils/concurrencytest.py
index fa6fa34b0e..0f7b3dcc11 100644
--- a/meta/lib/oeqa/core/utils/concurrencytest.py
+++ b/meta/lib/oeqa/core/utils/concurrencytest.py
@@ -78,29 +78,29 @@ class ProxyTestResult:
def __init__(self, target):
self.result = target
- def _addResult(self, method, test, *args, **kwargs):
+ def _addResult(self, method, test, *args, exception = False, **kwargs):
return method(test, *args, **kwargs)
- def addError(self, test, *args, **kwargs):
- self._addResult(self.result.addError, test, *args, **kwargs)
+ def addError(self, test, err = None, **kwargs):
+ self._addResult(self.result.addError, test, err, exception = True, **kwargs)
- def addFailure(self, test, *args, **kwargs):
- self._addResult(self.result.addFailure, test, *args, **kwargs)
+ def addFailure(self, test, err = None, **kwargs):
+ self._addResult(self.result.addFailure, test, err, exception = True, **kwargs)
- def addSuccess(self, test, *args, **kwargs):
- self._addResult(self.result.addSuccess, test, *args, **kwargs)
+ def addSuccess(self, test, **kwargs):
+ self._addResult(self.result.addSuccess, test, **kwargs)
- def addExpectedFailure(self, test, *args, **kwargs):
- self._addResult(self.result.addExpectedFailure, test, *args, **kwargs)
+ def addExpectedFailure(self, test, err = None, **kwargs):
+ self._addResult(self.result.addExpectedFailure, test, err, exception = True, **kwargs)
- def addUnexpectedSuccess(self, test, *args, **kwargs):
- self._addResult(self.result.addUnexpectedSuccess, test, *args, **kwargs)
+ def addUnexpectedSuccess(self, test, **kwargs):
+ self._addResult(self.result.addUnexpectedSuccess, test, **kwargs)
def __getattr__(self, attr):
return getattr(self.result, attr)
class ExtraResultsDecoderTestResult(ProxyTestResult):
- def _addResult(self, method, test, *args, **kwargs):
+ def _addResult(self, method, test, *args, exception = False, **kwargs):
if "details" in kwargs and "extraresults" in kwargs["details"]:
if isinstance(kwargs["details"]["extraresults"], Content):
kwargs = kwargs.copy()
@@ -114,7 +114,7 @@ class ExtraResultsDecoderTestResult(ProxyTestResult):
return method(test, *args, **kwargs)
class ExtraResultsEncoderTestResult(ProxyTestResult):
- def _addResult(self, method, test, *args, **kwargs):
+ def _addResult(self, method, test, *args, exception = False, **kwargs):
if hasattr(test, "extraresults"):
extras = lambda : [json.dumps(test.extraresults).encode()]
kwargs = kwargs.copy()
@@ -123,6 +123,11 @@ class ExtraResultsEncoderTestResult(ProxyTestResult):
else:
kwargs["details"] = kwargs["details"].copy()
kwargs["details"]["extraresults"] = Content(ContentType("application", "json", {'charset': 'utf8'}), extras)
+ # if using details, need to encode any exceptions into the details obj,
+ # testtools does not handle "err" and "details" together.
+ if "details" in kwargs and exception and (len(args) >= 1 and args[0] is not None):
+ kwargs["details"]["traceback"] = testtools.content.TracebackContent(args[0], test)
+ args = []
return method(test, *args, **kwargs)
#
@@ -216,7 +221,7 @@ def removebuilddir(d):
while delay and os.path.exists(d + "/bitbake.lock"):
time.sleep(1)
delay = delay - 1
- bb.utils.prunedir(d)
+ bb.utils.prunedir(d, ionice=True)
def fork_for_tests(concurrency_num, suite):
result = []
diff --git a/meta/lib/oeqa/runtime/cases/df.py b/meta/lib/oeqa/runtime/cases/df.py
index d8d79f32ea..89fd0fb901 100644
--- a/meta/lib/oeqa/runtime/cases/df.py
+++ b/meta/lib/oeqa/runtime/cases/df.py
@@ -11,7 +11,7 @@ class DfTest(OERuntimeTestCase):
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
def test_df(self):
- cmd = "df / | sed -n '2p' | awk '{print $4}'"
+ cmd = "df -P / | sed -n '2p' | awk '{print $4}'"
(status,output) = self.target.run(cmd)
msg = 'Not enough space on image. Current size is %s' % output
self.assertTrue(int(output)>5120, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/dnf.py b/meta/lib/oeqa/runtime/cases/dnf.py
index 80cc86a4fb..de3759995e 100644
--- a/meta/lib/oeqa/runtime/cases/dnf.py
+++ b/meta/lib/oeqa/runtime/cases/dnf.py
@@ -117,6 +117,7 @@ class DnfRepoTest(DnfTest):
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
@skipIfInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when not enable usrmerge')
+ @OEHasPackage('busybox')
def test_dnf_installroot(self):
rootpath = '/home/root/chroot/test'
#Copy necessary files to avoid errors with not yet installed tools on
@@ -143,6 +144,7 @@ class DnfRepoTest(DnfTest):
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
@skipIfNotInDataVar('DISTRO_FEATURES', 'usrmerge', 'Test run when enable usrmege')
+ @OEHasPackage('busybox')
def test_dnf_installroot_usrmerge(self):
rootpath = '/home/root/chroot/test'
#Copy necessary files to avoid errors with not yet installed tools on
diff --git a/meta/lib/oeqa/runtime/cases/opkg.py b/meta/lib/oeqa/runtime/cases/opkg.py
index bb8b6d99d2..750706161b 100644
--- a/meta/lib/oeqa/runtime/cases/opkg.py
+++ b/meta/lib/oeqa/runtime/cases/opkg.py
@@ -5,7 +5,7 @@
import os
from oeqa.utils.httpserver import HTTPService
from oeqa.runtime.case import OERuntimeTestCase
-from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature, skipIfFeature
from oeqa.runtime.decorator.package import OEHasPackage
class OpkgTest(OERuntimeTestCase):
@@ -45,6 +45,8 @@ class OpkgRepoTest(OpkgTest):
'Test requires package-management to be in IMAGE_FEATURES')
@skipIfNotDataVar('IMAGE_PKGTYPE', 'ipk',
'IPK is not the primary package manager')
+ @skipIfFeature('read-only-rootfs',
+ 'Test does not work with read-only-rootfs in IMAGE_FEATURES')
@OEHasPackage(['opkg'])
def test_opkg_install_from_repo(self):
self.setup_source_config_for_package_install()
diff --git a/meta/lib/oeqa/runtime/cases/systemd.py b/meta/lib/oeqa/runtime/cases/systemd.py
index c11fa49b07..7c44abe8ed 100644
--- a/meta/lib/oeqa/runtime/cases/systemd.py
+++ b/meta/lib/oeqa/runtime/cases/systemd.py
@@ -9,7 +9,7 @@ from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfDataVar, skipIfNotDataVar
from oeqa.runtime.decorator.package import OEHasPackage
-from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.core.decorator.data import skipIfNotFeature, skipIfFeature
class SystemdTest(OERuntimeTestCase):
@@ -114,12 +114,26 @@ class SystemdServiceTests(SystemdTest):
self.systemctl('is-active', 'avahi-daemon.service', verbose=True)
@OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ @skipIfFeature('read-only-rootfs',
+ 'Test is only meant to run without read-only-rootfs in IMAGE_FEATURES')
def test_systemd_disable_enable(self):
self.systemctl('disable', 'avahi-daemon.service')
self.systemctl('is-enabled', 'avahi-daemon.service', expected=1)
self.systemctl('enable', 'avahi-daemon.service')
self.systemctl('is-enabled', 'avahi-daemon.service')
+ @OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ @skipIfNotFeature('read-only-rootfs',
+ 'Test is only meant to run with read-only-rootfs in IMAGE_FEATURES')
+ def test_systemd_disable_enable_ro(self):
+ status = self.target.run('mount -orw,remount /')[0]
+ self.assertTrue(status == 0, msg='Remounting / as r/w failed')
+ try:
+ self.test_systemd_disable_enable()
+ finally:
+ status = self.target.run('mount -oro,remount /')[0]
+ self.assertTrue(status == 0, msg='Remounting / as r/o failed')
+
class SystemdJournalTests(SystemdTest):
@OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
diff --git a/meta/lib/oeqa/runtime/context.py b/meta/lib/oeqa/runtime/context.py
index 77d58eefa7..ef738a3359 100644
--- a/meta/lib/oeqa/runtime/context.py
+++ b/meta/lib/oeqa/runtime/context.py
@@ -138,7 +138,7 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
def add_controller_list(path):
if not os.path.exists(os.path.join(path, '__init__.py')):
raise OSError('Controllers directory %s exists but is missing __init__.py' % path)
- files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
+ files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_') and not f.startswith('.#')])
for f in files:
module = 'oeqa.controllers.' + f[:-3]
if module not in controllerslist:
diff --git a/meta/lib/oeqa/sdk/cases/buildepoxy.py b/meta/lib/oeqa/sdk/cases/buildepoxy.py
index f3d207c6dd..4211955f8d 100644
--- a/meta/lib/oeqa/sdk/cases/buildepoxy.py
+++ b/meta/lib/oeqa/sdk/cases/buildepoxy.py
@@ -32,7 +32,9 @@ class EpoxyTest(OESDKTestCase):
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
- self._run("meson -Degl=no -Dglx=no -Dx11=false {build} {source}".format(**dirs))
+ log = self._run("meson -Degl=no -Dglx=no -Dx11=false {build} {source}".format(**dirs))
+ # Check that Meson thinks we're doing a cross build and not a native
+ self.assertIn("Build type: cross build", log)
self._run("ninja -C {build} -v".format(**dirs))
self._run("DESTDIR={install} ninja -C {build} -v install".format(**dirs))
diff --git a/meta/lib/oeqa/selftest/case.py b/meta/lib/oeqa/selftest/case.py
index d207a0af0c..ac3308d8a4 100644
--- a/meta/lib/oeqa/selftest/case.py
+++ b/meta/lib/oeqa/selftest/case.py
@@ -193,13 +193,20 @@ to ensure accurate results.")
self.logger.debug("Adding path '%s' to be cleaned up when test is over" % path)
self._track_for_cleanup.append(path)
- def write_config(self, data):
- """Write to <builddir>/conf/selftest.inc"""
+ def write_config(self, data, multiconfig=None):
+ """Write to config file"""
+ if multiconfig:
+ multiconfigdir = "%s/conf/multiconfig" % self.builddir
+ os.makedirs(multiconfigdir, exist_ok=True)
+ dest_path = '%s/%s.conf' % (multiconfigdir, multiconfig)
+ self.track_for_cleanup(dest_path)
+ else:
+ dest_path = self.testinc_path
- self.logger.debug("Writing to: %s\n%s\n" % (self.testinc_path, data))
- ftools.write_file(self.testinc_path, data)
+ self.logger.debug("Writing to: %s\n%s\n" % (dest_path, data))
+ ftools.write_file(dest_path, data)
- if self.tc.custommachine and 'MACHINE' in data:
+ if not multiconfig and self.tc.custommachine and 'MACHINE' in data:
machine = get_bb_var('MACHINE')
self.logger.warning('MACHINE overridden: %s' % machine)
diff --git a/meta/lib/oeqa/selftest/cases/bblayers.py b/meta/lib/oeqa/selftest/cases/bblayers.py
index 954488dfd1..f131d9856c 100644
--- a/meta/lib/oeqa/selftest/cases/bblayers.py
+++ b/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -14,21 +14,21 @@ class BitbakeLayers(OESelftestTestCase):
def test_bitbakelayers_showcrossdepends(self):
result = runCmd('bitbake-layers show-cross-depends')
- self.assertTrue('aspell' in result.output, msg = "No dependencies were shown. bitbake-layers show-cross-depends output: %s" % result.output)
+ self.assertIn('aspell', result.output)
def test_bitbakelayers_showlayers(self):
result = runCmd('bitbake-layers show-layers')
- self.assertTrue('meta-selftest' in result.output, msg = "No layers were shown. bitbake-layers show-layers output: %s" % result.output)
+ self.assertIn('meta-selftest', result.output)
def test_bitbakelayers_showappends(self):
recipe = "xcursor-transparent-theme"
bb_file = self.get_recipe_basename(recipe)
result = runCmd('bitbake-layers show-appends')
- self.assertTrue(bb_file in result.output, msg="%s file was not recognised. bitbake-layers show-appends output: %s" % (bb_file, result.output))
+ self.assertIn(bb_file, result.output)
def test_bitbakelayers_showoverlayed(self):
result = runCmd('bitbake-layers show-overlayed')
- self.assertTrue('aspell' in result.output, msg="aspell overlayed recipe was not recognised bitbake-layers show-overlayed %s" % result.output)
+ self.assertIn('aspell', result.output)
def test_bitbakelayers_flatten(self):
recipe = "xcursor-transparent-theme"
diff --git a/meta/lib/oeqa/selftest/cases/bbtests.py b/meta/lib/oeqa/selftest/cases/bbtests.py
index 8e59bafae3..dc423ec439 100644
--- a/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -44,7 +44,7 @@ class BitbakeTests(OESelftestTestCase):
find_build_completed = re.search(r"Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output)
self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output)
- self.assertFalse('Test for bb.event.InvalidEvent' in result.output, msg = "\"Test for bb.event.InvalidEvent\" message found during bitbake process. bitbake output: %s" % result.output)
+ self.assertNotIn('Test for bb.event.InvalidEvent', result.output)
def test_local_sstate(self):
bitbake('m4-native')
@@ -59,7 +59,7 @@ class BitbakeTests(OESelftestTestCase):
def test_bitbake_invalid_target(self):
result = bitbake('asdf', ignore_status=True)
- self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output, msg = "Though no 'asdf' target exists, bitbake didn't output any err. message. bitbake output: %s" % result.output)
+ self.assertIn("ERROR: Nothing PROVIDES 'asdf'", result.output)
def test_warnings_errors(self):
result = bitbake('-b asdf', ignore_status=True)
@@ -118,11 +118,12 @@ class BitbakeTests(OESelftestTestCase):
self.assertIn(task, result.output, msg="Couldn't find %s task.")
def test_bitbake_g(self):
- result = bitbake('-g core-image-minimal')
+ recipe = 'base-files'
+ result = bitbake('-g %s' % recipe)
for f in ['pn-buildlist', 'task-depends.dot']:
self.addCleanup(os.remove, f)
self.assertTrue('Task dependencies saved to \'task-depends.dot\'' in result.output, msg = "No task dependency \"task-depends.dot\" file was generated for the given task target. bitbake output: %s" % result.output)
- self.assertTrue('busybox' in ftools.read_file(os.path.join(self.builddir, 'task-depends.dot')), msg = "No \"busybox\" dependency found in task-depends.dot file.")
+ self.assertIn(recipe, ftools.read_file(os.path.join(self.builddir, 'task-depends.dot')))
def test_image_manifest(self):
bitbake('core-image-minimal')
@@ -146,8 +147,7 @@ INHERIT_remove = \"report-error\"
bitbake('-ccleanall man-db')
self.delete_recipeinc('man-db')
self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
- self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output, msg = "\"invalid\" file \
-doesn't exist, yet no error message encountered. bitbake output: %s" % result.output)
+ self.assertIn('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:', result.output)
line = self.getline(result, 'Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
self.assertTrue(line and line.startswith("ERROR:"), msg = "\"invalid\" file \
doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result.output)
@@ -172,7 +172,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
def test_environment(self):
self.write_config("TEST_ENV=\"localconf\"")
result = runCmd('bitbake -e | grep TEST_ENV=')
- self.assertTrue('localconf' in result.output, msg = "bitbake didn't report any value for TEST_ENV variable. To test, run 'bitbake -e | grep TEST_ENV='")
+ self.assertIn('localconf', result.output)
def test_dry_run(self):
result = runCmd('bitbake -n m4-native')
@@ -192,10 +192,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.track_for_cleanup(preconf)
ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
- self.assertTrue('prefile' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration. ")
+ self.assertIn('prefile', result.output)
self.write_config("TEST_PREFILE=\"localconf\"")
result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
- self.assertTrue('localconf' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration.")
+ self.assertIn('localconf', result.output)
def test_postfile(self):
postconf = os.path.join(self.builddir, 'conf/postfile.conf')
@@ -203,7 +203,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
self.write_config("TEST_POSTFILE=\"localconf\"")
result = runCmd('bitbake -R conf/postfile.conf -e | grep TEST_POSTFILE=')
- self.assertTrue('postfile' in result.output, "Postconfigure file \"postfile.conf\"was not taken into consideration.")
+ self.assertIn('postfile', result.output)
def test_checkuri(self):
result = runCmd('bitbake -c checkuri m4')
diff --git a/meta/lib/oeqa/selftest/cases/binutils.py b/meta/lib/oeqa/selftest/cases/binutils.py
index 9bc752040f..821f52f5a8 100644
--- a/meta/lib/oeqa/selftest/cases/binutils.py
+++ b/meta/lib/oeqa/selftest/cases/binutils.py
@@ -4,6 +4,7 @@ import sys
import re
import logging
from oeqa.core.decorator import OETestTag
+from oeqa.core.case import OEPTestResultTestCase
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake, get_bb_var, get_bb_vars
@@ -15,7 +16,7 @@ def parse_values(content):
break
@OETestTag("toolchain-user", "toolchain-system")
-class BinutilsCrossSelfTest(OESelftestTestCase):
+class BinutilsCrossSelfTest(OESelftestTestCase, OEPTestResultTestCase):
def test_binutils(self):
self.run_binutils("binutils")
@@ -36,14 +37,14 @@ class BinutilsCrossSelfTest(OESelftestTestCase):
bitbake("{0} -c check".format(recipe))
- ptestsuite = "binutils-{}".format(suite) if suite != "binutils" else suite
- self.extraresults = {"ptestresult.sections" : {ptestsuite : {}}}
-
sumspath = os.path.join(builddir, suite, "{0}.sum".format(suite))
if not os.path.exists(sumspath):
sumspath = os.path.join(builddir, suite, "testsuite", "{0}.sum".format(suite))
+ logpath = os.path.splitext(sumspath)[0] + ".log"
+ ptestsuite = "binutils-{}".format(suite) if suite != "binutils" else suite
+ self.ptest_section(ptestsuite, logfile = logpath)
with open(sumspath, "r") as f:
for test, result in parse_values(f):
- self.extraresults["ptestresult.{}.{}".format(ptestsuite, test)] = {"status" : result}
+ self.ptest_result(ptestsuite, test, result)
diff --git a/meta/lib/oeqa/selftest/cases/devtool.py b/meta/lib/oeqa/selftest/cases/devtool.py
index 6fe145c994..3a25da2033 100644
--- a/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/meta/lib/oeqa/selftest/cases/devtool.py
@@ -518,8 +518,8 @@ class DevtoolModifyTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean mdadm')
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify mdadm -x %s' % tempdir)
self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
@@ -587,8 +587,8 @@ class DevtoolModifyTests(DevtoolBase):
self.track_for_cleanup(tempdir_m4)
self.track_for_cleanup(builddir_m4)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean mdadm m4')
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.write_recipeinc('m4', 'EXTERNALSRC_BUILD = "%s"\ndo_clean() {\n\t:\n}\n' % builddir_m4)
try:
runCmd('devtool modify mdadm -x %s' % tempdir_mdadm)
@@ -604,6 +604,7 @@ class DevtoolModifyTests(DevtoolBase):
bitbake('mdadm m4 -c buildclean')
assertNoFile(tempdir_mdadm, 'mdadm')
assertNoFile(builddir_m4, 'src/m4')
+ runCmd('echo "#Trigger rebuild" >> %s/Makefile' % tempdir_mdadm)
bitbake('mdadm m4 -c compile')
assertFile(tempdir_mdadm, 'mdadm')
assertFile(builddir_m4, 'src/m4')
@@ -683,8 +684,8 @@ class DevtoolModifyTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. devtool output: %s' % result.output)
@@ -715,8 +716,8 @@ class DevtoolModifyTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
self.assertExists(os.path.join(tempdir, 'configure.ac'), 'Extracted source could not be found')
self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
@@ -1246,8 +1247,8 @@ class DevtoolExtractTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
# Test that deploy-target at this point fails (properly)
result = runCmd('devtool deploy-target -n %s root@localhost' % testrecipe, ignore_status=True)
@@ -1297,8 +1298,8 @@ class DevtoolExtractTests(DevtoolBase):
self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
image = 'core-image-minimal'
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % image)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
bitbake('%s -c clean' % image)
# Add target and native recipes to workspace
recipes = ['mdadm', 'parted-native']
@@ -1707,8 +1708,8 @@ class DevtoolUpgradeTests(DevtoolBase):
self.track_for_cleanup(tempdir)
self.track_for_cleanup(tempdir_cfg)
self.track_for_cleanup(self.workspacedir)
- self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % kernel_provider)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
#Step 1
#Here is just generated the config file instead of all the kernel to optimize the
#time of executing this test case.
diff --git a/meta/lib/oeqa/selftest/cases/gcc.py b/meta/lib/oeqa/selftest/cases/gcc.py
index 2c25b5904c..5a917b9c44 100644
--- a/meta/lib/oeqa/selftest/cases/gcc.py
+++ b/meta/lib/oeqa/selftest/cases/gcc.py
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: MIT
import os
from oeqa.core.decorator import OETestTag
+from oeqa.core.case import OEPTestResultTestCase
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake, get_bb_var, get_bb_vars, runqemu, Command
@@ -11,7 +12,7 @@ def parse_values(content):
yield i[len(v) + 2:].strip(), v
break
-class GccSelfTestBase(OESelftestTestCase):
+class GccSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
def check_skip(self, suite):
targets = get_bb_var("RUNTIMETARGET", "gcc-runtime").split()
if suite not in targets:
@@ -41,20 +42,20 @@ class GccSelfTestBase(OESelftestTestCase):
bb_vars = get_bb_vars(["B", "TARGET_SYS"], recipe)
builddir, target_sys = bb_vars["B"], bb_vars["TARGET_SYS"]
- self.extraresults = {"ptestresult.sections" : {}}
for suite in suites:
sumspath = os.path.join(builddir, "gcc", "testsuite", suite, "{0}.sum".format(suite))
if not os.path.exists(sumspath): # check in target dirs
sumspath = os.path.join(builddir, target_sys, suite, "testsuite", "{0}.sum".format(suite))
if not os.path.exists(sumspath): # handle libstdc++-v3 -> libstdc++
sumspath = os.path.join(builddir, target_sys, suite, "testsuite", "{0}.sum".format(suite.split("-")[0]))
+ logpath = os.path.splitext(sumspath)[0] + ".log"
ptestsuite = "gcc-{}".format(suite) if suite != "gcc" else suite
ptestsuite = ptestsuite + "-user" if ssh is None else ptestsuite
- self.extraresults["ptestresult.sections"][ptestsuite] = {}
+ self.ptest_section(ptestsuite, logfile = logpath)
with open(sumspath, "r") as f:
for test, result in parse_values(f):
- self.extraresults["ptestresult.{}.{}".format(ptestsuite, test)] = {"status" : result}
+ self.ptest_result(ptestsuite, test, result)
def run_check_emulated(self, *args, **kwargs):
# build core-image-minimal with required packages
diff --git a/meta/lib/oeqa/selftest/cases/glibc.py b/meta/lib/oeqa/selftest/cases/glibc.py
index 2e42485dd6..c687f6ef93 100644
--- a/meta/lib/oeqa/selftest/cases/glibc.py
+++ b/meta/lib/oeqa/selftest/cases/glibc.py
@@ -2,6 +2,7 @@
import os
import contextlib
from oeqa.core.decorator import OETestTag
+from oeqa.core.case import OEPTestResultTestCase
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake, get_bb_var, get_bb_vars, runqemu, Command
from oeqa.utils.nfs import unfs_server
@@ -13,7 +14,7 @@ def parse_values(content):
yield i[len(v) + 2:].strip(), v
break
-class GlibcSelfTestBase(OESelftestTestCase):
+class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
def run_check(self, ssh = None):
# configure ssh target
features = []
@@ -31,10 +32,10 @@ class GlibcSelfTestBase(OESelftestTestCase):
builddir = get_bb_var("B", "glibc-testsuite")
ptestsuite = "glibc-user" if ssh is None else "glibc"
- self.extraresults = {"ptestresult.sections" : {ptestsuite : {}}}
+ self.ptest_section(ptestsuite)
with open(os.path.join(builddir, "tests.sum"), "r") as f:
for test, result in parse_values(f):
- self.extraresults["ptestresult.{}.{}".format(ptestsuite, test)] = {"status" : result}
+ self.ptest_result(ptestsuite, test, result)
def run_check_emulated(self):
with contextlib.ExitStack() as s:
diff --git a/meta/lib/oeqa/selftest/cases/imagefeatures.py b/meta/lib/oeqa/selftest/cases/imagefeatures.py
index afc629f29c..ef2eefa860 100644
--- a/meta/lib/oeqa/selftest/cases/imagefeatures.py
+++ b/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -124,7 +124,7 @@ class ImageFeatures(OESelftestTestCase):
# check if result image is sparse
image_stat = os.stat(image_path)
- self.assertTrue(image_stat.st_size > image_stat.st_blocks * 512)
+ self.assertGreater(image_stat.st_size, image_stat.st_blocks * 512)
# check if the resulting gzip is valid
self.assertTrue(runCmd('gzip -t %s' % gzip_path))
@@ -161,7 +161,12 @@ class ImageFeatures(OESelftestTestCase):
sysroot = get_bb_var('STAGING_DIR_NATIVE', 'core-image-minimal')
result = runCmd('qemu-img info --output json %s' % image_path,
native_sysroot=sysroot)
- self.assertTrue(json.loads(result.output).get('format') == itype)
+ try:
+ data = json.loads(result.output)
+ self.assertEqual(data.get('format'), itype,
+ msg="Unexpected format in '%s'" % (result.output))
+ except json.decoder.JSONDecodeError:
+ self.fail("Could not parse '%ss'" % result.output)
def test_long_chain_conversion(self):
"""
diff --git a/meta/lib/oeqa/selftest/cases/incompatible_lic.py b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
index 8fb93af8a8..904b5b4094 100644
--- a/meta/lib/oeqa/selftest/cases/incompatible_lic.py
+++ b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
@@ -39,3 +39,55 @@ class IncompatibleLicenseTests(OESelftestTestCase):
# INCOMPATIBLE_LICENSE contains this license
def test_incompatible_nonspdx_license(self):
self.lic_test('incompatible-nonspdx-license', 'FooLicense', 'FooLicense')
+
+class IncompatibleLicensePerImageTests(OESelftestTestCase):
+ def default_config(self):
+ return """
+IMAGE_INSTALL_append = "bash"
+INCOMPATIBLE_LICENSE_pn-core-image-minimal = "GPL-3.0 LGPL-3.0"
+"""
+
+ def test_bash_default(self):
+ self.write_config(self.default_config())
+ error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash has an incompatible license GPLv3+ and cannot be installed into the image."
+
+ result = bitbake('core-image-minimal', ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
+
+ def test_bash_and_license(self):
+ self.write_config(self.default_config() + '\nLICENSE_append_pn-bash = " & SomeLicense"')
+ error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash has an incompatible license GPLv3+ & SomeLicense and cannot be installed into the image."
+
+ result = bitbake('core-image-minimal', ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
+
+ def test_bash_or_license(self):
+ self.write_config(self.default_config() + '\nLICENSE_append_pn-bash = " | SomeLicense"')
+
+ bitbake('core-image-minimal')
+
+ def test_bash_whitelist(self):
+ self.write_config(self.default_config() + '\nWHITELIST_GPL-3.0_pn-core-image-minimal = "bash"')
+
+ bitbake('core-image-minimal')
+
+class NoGPL3InImagesTests(OESelftestTestCase):
+ def test_core_image_minimal(self):
+ self.write_config("""
+INCOMPATIBLE_LICENSE_pn-core-image-minimal = "GPL-3.0 LGPL-3.0"
+""")
+ bitbake('core-image-minimal')
+
+ def test_core_image_full_cmdline(self):
+ self.write_config("""
+INHERIT += "testimage"\n
+INCOMPATIBLE_LICENSE_pn-core-image-full-cmdline = "GPL-3.0 LGPL-3.0"\n
+RDEPENDS_packagegroup-core-full-cmdline-utils_remove = "bash bc coreutils cpio ed findutils gawk grep mc mc-fish mc-helpers mc-helpers-perl sed tar time"\n
+RDEPENDS_packagegroup-core-full-cmdline-dev-utils_remove = "diffutils m4 make patch"\n
+RDEPENDS_packagegroup-core-full-cmdline-multiuser_remove = "gzip"\n
+""")
+ bitbake('core-image-full-cmdline')
+ bitbake('-c testimage core-image-full-cmdline')
+
diff --git a/meta/lib/oeqa/selftest/cases/multiconfig.py b/meta/lib/oeqa/selftest/cases/multiconfig.py
index d21bf0a411..39b92f2439 100644
--- a/meta/lib/oeqa/selftest/cases/multiconfig.py
+++ b/meta/lib/oeqa/selftest/cases/multiconfig.py
@@ -3,16 +3,16 @@
#
import os
+import textwrap
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
-import oeqa.utils.ftools as ftools
class MultiConfig(OESelftestTestCase):
def test_multiconfig(self):
"""
- Test that a simple multiconfig build works. This uses the mcextend class and the
- multiconfig-image-packager test recipe to build a core-image-full-cmdline image which
+ Test that a simple multiconfig build works. This uses the mcextend class and the
+ multiconfig-image-packager test recipe to build a core-image-full-cmdline image which
contains a tiny core-image-minimal and a musl core-image-minimal, installed as packages.
"""
@@ -28,20 +28,45 @@ DISTRO = "poky"
TCLIBC = "musl"
TMPDIR = "${TOPDIR}/tmp-mc-musl"
"""
+ self.write_config(muslconfig, 'musl')
tinyconfig = """
MACHINE = "qemux86"
DISTRO = "poky-tiny"
TMPDIR = "${TOPDIR}/tmp-mc-tiny"
"""
-
- multiconfigdir = self.builddir + "/conf/multiconfig"
- os.makedirs(multiconfigdir, exist_ok=True)
- self.track_for_cleanup(multiconfigdir + "/musl.conf")
- ftools.write_file(multiconfigdir + "/musl.conf", muslconfig)
- self.track_for_cleanup(multiconfigdir + "/tiny.conf")
- ftools.write_file(multiconfigdir + "/tiny.conf", tinyconfig)
+ self.write_config(tinyconfig, 'tiny')
# Build a core-image-minimal
bitbake('core-image-full-cmdline')
+ def test_multiconfig_reparse(self):
+ """
+ Test that changes to a multiconfig conf file are correctly detected and
+ cause a reparse/rebuild of a recipe.
+ """
+ config = textwrap.dedent('''\
+ MCTESTVAR = "test"
+ BBMULTICONFIG = "test"
+ ''')
+ self.write_config(config)
+
+ testconfig = textwrap.dedent('''\
+ MCTESTVAR_append = "1"
+ ''')
+ self.write_config(testconfig, 'test')
+
+ # Check that the 1) the task executed and 2) that it output the correct
+ # value. Note "bitbake -e" is not used because it always reparses the
+ # recipe and we want to ensure that the automatic reparsing and parse
+ # caching is detected.
+ result = bitbake('mc:test:multiconfig-test-parse -c showvar')
+ self.assertIn('MCTESTVAR=test1', result.output.splitlines())
+
+ testconfig = textwrap.dedent('''\
+ MCTESTVAR_append = "2"
+ ''')
+ self.write_config(testconfig, 'test')
+
+ result = bitbake('mc:test:multiconfig-test-parse -c showvar')
+ self.assertIn('MCTESTVAR=test2', result.output.splitlines())
diff --git a/meta/lib/oeqa/selftest/cases/oescripts.py b/meta/lib/oeqa/selftest/cases/oescripts.py
index 7770b66a26..80d8b2c4cc 100644
--- a/meta/lib/oeqa/selftest/cases/oescripts.py
+++ b/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -3,10 +3,12 @@
#
import os
+import shutil
import unittest
from oeqa.selftest.case import OESelftestTestCase
from oeqa.selftest.cases.buildhistory import BuildhistoryBase
from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
+from oeqa.utils import CommandError
class BuildhistoryDiffTests(BuildhistoryBase):
@@ -63,3 +65,65 @@ class OEPybootchartguyTests(OEScriptTests):
runCmd('%s/pybootchartgui/pybootchartgui.py %s -o %s/charts -f pdf' % (self.scripts_dir, self.buildstats, self.tmpdir))
self.assertTrue(os.path.exists(self.tmpdir + "/charts.pdf"))
+class OEGitproxyTests(OESelftestTestCase):
+
+ scripts_dir = os.path.join(get_bb_var('COREBASE'), 'scripts')
+
+ def test_oegitproxy_help(self):
+ try:
+ res = runCmd('%s/oe-git-proxy --help' % self.scripts_dir, assert_error=False)
+ self.assertTrue(False)
+ except CommandError as e:
+ self.assertEqual(2, e.retcode)
+
+ def run_oegitproxy(self, custom_shell=None):
+ os.environ['SOCAT'] = shutil.which("echo")
+ os.environ['ALL_PROXY'] = "https://proxy.example.com:3128"
+ os.environ['NO_PROXY'] = "*.example.com,.no-proxy.org,192.168.42.0/24,127.*.*.*"
+
+ if custom_shell is None:
+ prefix = ''
+ else:
+ prefix = custom_shell + ' '
+
+ # outside, use the proxy
+ res = runCmd('%s%s/oe-git-proxy host.outside-example.com 9418' %
+ (prefix,self.scripts_dir))
+ self.assertIn('PROXY:', res.output)
+ # match with wildcard suffix
+ res = runCmd('%s%s/oe-git-proxy host.example.com 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+ # match just suffix
+ res = runCmd('%s%s/oe-git-proxy host.no-proxy.org 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+ # match IP subnet
+ res = runCmd('%s%s/oe-git-proxy 192.168.42.42 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+ # match IP wildcard
+ res = runCmd('%s%s/oe-git-proxy 127.1.2.3 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+
+ # test that * globbering is off
+ os.environ['NO_PROXY'] = "*"
+ res = runCmd('%s%s/oe-git-proxy host.example.com 9418' %
+ (prefix, self.scripts_dir))
+ self.assertIn('TCP:', res.output)
+
+ def test_oegitproxy_proxy(self):
+ self.run_oegitproxy()
+
+ def test_oegitproxy_proxy_dash(self):
+ dash = shutil.which("dash")
+ if dash is None:
+ self.skipTest("No \"dash\" found on test system.")
+ self.run_oegitproxy(custom_shell=dash)
+
+class OeRunNativeTest(OESelftestTestCase):
+ def test_oe_run_native(self):
+ bitbake("qemu-helper-native -c addto_recipe_sysroot")
+ result = runCmd("oe-run-native qemu-helper-native tunctl -h")
+ self.assertIn("Delete: tunctl -d device-name [-f tun-clone-device]", result.output)
diff --git a/meta/lib/oeqa/selftest/cases/recipetool.py b/meta/lib/oeqa/selftest/cases/recipetool.py
index 1c701a40bf..c1562c63b2 100644
--- a/meta/lib/oeqa/selftest/cases/recipetool.py
+++ b/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -685,7 +685,9 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
self._test_appendsrcfile(testrecipe, filepath, srcdir=subdir)
bitbake('%s:do_unpack' % testrecipe)
- self.assertEqual(open(self.testfile, 'r').read(), open(os.path.join(srcdir, filepath), 'r').read())
+ with open(self.testfile, 'r') as testfile:
+ with open(os.path.join(srcdir, filepath), 'r') as makefilein:
+ self.assertEqual(testfile.read(), makefilein.read())
def test_recipetool_appendsrcfiles_basic(self, destdir=None):
newfiles = [self.testfile]
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
index eee09d3fb2..a9110565a9 100644
--- a/meta/lib/oeqa/selftest/cases/reproducible.py
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -5,11 +5,16 @@
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+import bb.utils
import functools
import multiprocessing
import textwrap
import json
import unittest
+import tempfile
+import shutil
+import stat
+import os
MISSING = 'MISSING'
DIFFERENT = 'DIFFERENT'
@@ -72,8 +77,9 @@ def compare_file(reference, test, diffutils_sysroot):
return result
class ReproducibleTests(OESelftestTestCase):
- package_classes = ['deb']
+ package_classes = ['deb', 'ipk']
images = ['core-image-minimal']
+ save_results = False
def setUpLocal(self):
super().setUpLocal()
@@ -117,9 +123,18 @@ class ReproducibleTests(OESelftestTestCase):
self.extrasresults['reproducible']['files'].setdefault(package_class, {})[name] = [
{'reference': p.reference, 'test': p.test} for p in packages]
+ def copy_file(self, source, dest):
+ bb.utils.mkdirhier(os.path.dirname(dest))
+ shutil.copyfile(source, dest)
+
def test_reproducible_builds(self):
capture_vars = ['DEPLOY_DIR_' + c.upper() for c in self.package_classes]
+ if self.save_results:
+ save_dir = tempfile.mkdtemp(prefix='oe-reproducible-')
+ os.chmod(save_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+ self.logger.info('Non-reproducible packages will be copied to %s', save_dir)
+
# Build native utilities
self.write_config('')
bitbake("diffutils-native -c addto_recipe_sysroot")
@@ -176,6 +191,11 @@ class ReproducibleTests(OESelftestTestCase):
self.write_package_list(package_class, 'different', result.different)
self.write_package_list(package_class, 'same', result.same)
+ if self.save_results:
+ for d in result.different:
+ self.copy_file(d.reference, '/'.join([save_dir, d.reference]))
+ self.copy_file(d.test, '/'.join([save_dir, d.test]))
+
if result.missing or result.different:
self.fail("The following %s packages are missing or different: %s" %
(c, ' '.join(r.test for r in (result.missing + result.different))))
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
index 20969d2c48..7d3922ce44 100644
--- a/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -179,6 +179,8 @@ class TestImage(OESelftestTestCase):
distro = oe.lsb.distro_identifier()
if distro and distro == 'debian-8':
self.skipTest('virgl isn\'t working with Debian 8')
+ if distro and distro == 'centos-7':
+ self.skipTest('virgl isn\'t working with Centos 7')
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
features = 'INHERIT += "testimage"\n'
@@ -191,7 +193,7 @@ class TestImage(OESelftestTestCase):
features += 'TEST_SUITES = "ping ssh virgl"\n'
features += 'IMAGE_FEATURES_append = " ssh-server-dropbear"\n'
features += 'IMAGE_INSTALL_append = " kmscube"\n'
- features += 'TEST_RUNQEMUPARAMS = "gtk-gl"\n'
+ features += 'TEST_RUNQEMUPARAMS = "gtk gl"\n'
self.write_config(features)
bitbake('core-image-minimal')
bitbake('-c testimage core-image-minimal')
diff --git a/meta/lib/oeqa/selftest/cases/signing.py b/meta/lib/oeqa/selftest/cases/signing.py
index b390f37d8e..93b15ae681 100644
--- a/meta/lib/oeqa/selftest/cases/signing.py
+++ b/meta/lib/oeqa/selftest/cases/signing.py
@@ -3,7 +3,7 @@
#
from oeqa.selftest.case import OESelftestTestCase
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, create_temp_layer
import os
import oe
import glob
@@ -180,11 +180,11 @@ class LockedSignatures(OESelftestTestCase):
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
"""
+ import uuid
+
test_recipe = 'ed'
locked_sigs_file = 'locked-sigs.inc'
- self.add_command_to_tearDown('rm -f %s' % os.path.join(self.builddir, locked_sigs_file))
-
bitbake(test_recipe)
# Generate locked sigs include file
bitbake('-S none %s' % test_recipe)
@@ -196,21 +196,29 @@ class LockedSignatures(OESelftestTestCase):
# Build a locked recipe
bitbake(test_recipe)
+ templayerdir = tempfile.mkdtemp(prefix='signingqa')
+ create_temp_layer(templayerdir, 'selftestsigning')
+ runCmd('bitbake-layers add-layer %s' % templayerdir)
+
# Make a change that should cause the locked task signature to change
+ # Use uuid so hash equivalance server isn't triggered
recipe_append_file = test_recipe + '_' + get_bb_var('PV', test_recipe) + '.bbappend'
- recipe_append_path = os.path.join(self.testlayer_path, 'recipes-test', test_recipe, recipe_append_file)
- feature = 'SUMMARY += "test locked signature"\n'
+ recipe_append_path = os.path.join(templayerdir, 'recipes-test', test_recipe, recipe_append_file)
+ feature = 'SUMMARY_${PN} = "test locked signature%s"\n' % uuid.uuid4()
- os.mkdir(os.path.join(self.testlayer_path, 'recipes-test', test_recipe))
+ os.mkdir(os.path.join(templayerdir, 'recipes-test'))
+ os.mkdir(os.path.join(templayerdir, 'recipes-test', test_recipe))
write_file(recipe_append_path, feature)
- self.add_command_to_tearDown('rm -rf %s' % os.path.join(self.testlayer_path, 'recipes-test', test_recipe))
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % templayerdir)
+ self.add_command_to_tearDown('rm -f %s' % os.path.join(self.builddir, locked_sigs_file))
+ self.add_command_to_tearDown('rm -rf %s' % templayerdir)
# Build the recipe again
ret = bitbake(test_recipe)
# Verify you get the warning and that the real task *isn't* run (i.e. the locked signature has worked)
- patt = r'WARNING: The %s:do_package sig is computed to be \S+, but the sig is locked to \S+ in SIGGEN_LOCKEDSIGS\S+' % test_recipe
+ patt = r'The %s:do_package sig is computed to be \S+, but the sig is locked to \S+ in SIGGEN_LOCKEDSIGS\S+' % test_recipe
found_warn = re.search(patt, ret.output)
self.assertIsNotNone(found_warn, "Didn't find the expected warning message. Output: %s" % ret.output)
diff --git a/meta/lib/oeqa/selftest/cases/wic.py b/meta/lib/oeqa/selftest/cases/wic.py
index fcdd55059d..ea75300406 100644
--- a/meta/lib/oeqa/selftest/cases/wic.py
+++ b/meta/lib/oeqa/selftest/cases/wic.py
@@ -632,8 +632,10 @@ class Wic2(WicTestCase):
# 1:0.00MiB:200MiB:200MiB:ext4::;\n
partlns = res.output.splitlines()[2:]
- self.assertEqual(1, len(partlns))
- self.assertEqual("1:0.00MiB:200MiB:200MiB:ext4::;", partlns[0])
+ self.assertEqual(1, len(partlns),
+ msg="Partition list '%s'" % res.output)
+ self.assertEqual("1:0.00MiB:200MiB:200MiB:ext4::;", partlns[0],
+ msg="Partition list '%s'" % res.output)
def test_fixed_size_error(self):
"""
diff --git a/meta/recipes-bsp/acpid/acpid.inc b/meta/recipes-bsp/acpid/acpid.inc
index 766ed4f89e..1e43e7a9db 100644
--- a/meta/recipes-bsp/acpid/acpid.inc
+++ b/meta/recipes-bsp/acpid/acpid.inc
@@ -1,4 +1,10 @@
SUMMARY = "A daemon for delivering ACPI events"
+DESCRIPTION = "ACPID is a completely flexible, totally extensible daemon for \
+delivering ACPI events. It listens on netlink interface (or on the \
+deprecated file /proc/acpi/event), and when an event occurs, executes programs \
+to handle the event. The programs it executes are configured through a set of \
+configuration files, which can be dropped into place by packages or by the \
+admin."
HOMEPAGE = "http://sourceforge.net/projects/acpid2"
BUGTRACKER = "http://sourceforge.net/p/acpid2/tickets/?source=navbar"
SECTION = "base"
diff --git a/meta/recipes-bsp/formfactor/formfactor_0.0.bb b/meta/recipes-bsp/formfactor/formfactor_0.0.bb
index 53cf1cf774..ea1fa4c754 100644
--- a/meta/recipes-bsp/formfactor/formfactor_0.0.bb
+++ b/meta/recipes-bsp/formfactor/formfactor_0.0.bb
@@ -1,4 +1,7 @@
SUMMARY = "Device formfactor information"
+DESCRIPTION = "A formfactor configuration file provides information about the \
+target hardware for which the image is being built and information that the \
+build system cannot obtain from other sources such as the kernel."
SECTION = "base"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
diff --git a/meta/recipes-bsp/gnu-efi/gnu-efi/gnu-efi-3.0.9-fix-clang-build.patch b/meta/recipes-bsp/gnu-efi/gnu-efi/gnu-efi-3.0.9-fix-clang-build.patch
new file mode 100644
index 0000000000..c6d660095e
--- /dev/null
+++ b/meta/recipes-bsp/gnu-efi/gnu-efi/gnu-efi-3.0.9-fix-clang-build.patch
@@ -0,0 +1,24 @@
+Fix building with CLANG-9.0.0
+
+Fixes
+clang-9: error: unknown argument: '-maccumulate-outgoing-args'
+
+Upstream-Status: Submitted [https://sourceforge.net/p/gnu-efi/patches/70/]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+--- a/Make.defaults
++++ b/Make.defaults
+@@ -110,10 +110,10 @@
+ || ( [ $(GCCVERSION) -eq "4" ] \
+ && [ $(GCCMINOR) -ge "7" ] ) ) \
+ && echo 1)
+- ifeq ($(GCCNEWENOUGH),1)
+- CPPFLAGS += -DGNU_EFI_USE_MS_ABI -maccumulate-outgoing-args --std=c11
+- else ifeq ($(USING_CLANG),clang)
++ ifeq ($(USING_CLANG),clang)
+ CPPFLAGS += -DGNU_EFI_USE_MS_ABI --std=c11
++ else ifeq ($(GCCNEWENOUGH),1)
++ CPPFLAGS += -DGNU_EFI_USE_MS_ABI -maccumulate-outgoing-args --std=c11
+ endif
+
+ CFLAGS += -mno-red-zone
diff --git a/meta/recipes-bsp/gnu-efi/gnu-efi/parallel-make-archives.patch b/meta/recipes-bsp/gnu-efi/gnu-efi/parallel-make-archives.patch
index a9806cfdf6..8a0138bbe5 100644
--- a/meta/recipes-bsp/gnu-efi/gnu-efi/parallel-make-archives.patch
+++ b/meta/recipes-bsp/gnu-efi/gnu-efi/parallel-make-archives.patch
@@ -19,25 +19,7 @@ Signed-off-by: Darren Hart <dvhart@linux.intel.com>
Signed-off-by: California Sullivan <california.l.sullivan@intel.com>
[Rebased for 3.0.8]
Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
----
- gnuefi/Makefile | 3 ++-
- lib/Makefile | 2 +-
- 2 files changed, 3 insertions(+), 2 deletions(-)
-diff --git a/gnuefi/Makefile b/gnuefi/Makefile
-index 2a61699..89b560a 100644
---- a/gnuefi/Makefile
-+++ b/gnuefi/Makefile
-@@ -54,7 +54,8 @@ TARGETS = crt0-efi-$(ARCH).o libgnuefi.a
-
- all: $(TARGETS)
-
--libgnuefi.a: $(patsubst %,libgnuefi.a(%),$(OBJS))
-+libgnuefi.a: $(OBJS)
-+ $(AR) $(ARFLAGS) $@ $(OBJS)
-
- clean:
- rm -f $(TARGETS) *~ *.o $(OBJS)
diff --git a/lib/Makefile b/lib/Makefile
index 0e6410d..048751a 100644
--- a/lib/Makefile
diff --git a/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.9.bb b/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb
index f844435472..9954d7f57a 100644
--- a/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.9.bb
+++ b/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb
@@ -15,10 +15,11 @@ LIC_FILES_CHKSUM = "file://gnuefi/crt0-efi-arm.S;beginline=4;endline=16;md5=e582
SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \
file://parallel-make-archives.patch \
file://lib-Makefile-fix-parallel-issue.patch \
+ file://gnu-efi-3.0.9-fix-clang-build.patch \
"
-SRC_URI[md5sum] = "32af17b917545a693e549af2439c4a99"
-SRC_URI[sha256sum] = "6715ea7eae1c7e4fc5041034bd3f107ec2911962ed284a081e491646b12277f0"
+SRC_URI[md5sum] = "21148bbcccec385a9bfdf5f678959577"
+SRC_URI[sha256sum] = "f28da792a2532e91e18e0101468811739a22cde9eee5eacfd0efb9bf3a61d6b9"
COMPATIBLE_HOST = "(x86_64.*|i.86.*|aarch64.*|arm.*)-linux"
COMPATIBLE_HOST_armv4 = 'null'
diff --git a/meta/recipes-bsp/grub/grub-bootconf_1.00.bb b/meta/recipes-bsp/grub/grub-bootconf_1.00.bb
index 750f8c808a..572580313b 100644
--- a/meta/recipes-bsp/grub/grub-bootconf_1.00.bb
+++ b/meta/recipes-bsp/grub/grub-bootconf_1.00.bb
@@ -1,11 +1,16 @@
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
SUMMARY = "Basic grub.cfg for use in EFI systems"
+DESCRIPTION = "Grub might require different configuration file for \
+different machines."
+HOMEPAGE = "https://www.gnu.org/software/grub/manual/grub/grub.html#Configuration"
RPROVIDES_${PN} += "virtual/grub-bootconf"
inherit grub-efi-cfg
+require conf/image-uefi.conf
+
S = "${WORKDIR}"
GRUB_CFG = "${S}/grub-bootconf"
@@ -20,10 +25,8 @@ python do_configure() {
do_configure[vardeps] += "APPEND ROOT"
do_install() {
- install -d ${D}/boot
- install -d ${D}/boot/EFI
- install -d ${D}/boot/EFI/BOOT
- install grub-bootconf ${D}/boot/EFI/BOOT/grub.cfg
+ install -d ${D}${EFI_FILES_PATH}
+ install grub-bootconf ${D}${EFI_FILES_PATH}/grub.cfg
}
-FILES_${PN} = "/boot/EFI/BOOT/grub.cfg"
+FILES_${PN} = "${EFI_FILES_PATH}/grub.cfg"
diff --git a/meta/recipes-bsp/grub/grub-efi_2.04.bb b/meta/recipes-bsp/grub/grub-efi_2.04.bb
index aaa198ea23..b9d6225d27 100644
--- a/meta/recipes-bsp/grub/grub-efi_2.04.bb
+++ b/meta/recipes-bsp/grub/grub-efi_2.04.bb
@@ -1,9 +1,11 @@
require grub2.inc
+require conf/image-uefi.conf
+
GRUBPLATFORM = "efi"
DEPENDS_append_class-target = " grub-efi-native"
-RDEPENDS_${PN}_class-target = "diffutils freetype grub-common virtual/grub-bootconf"
+RDEPENDS_${PN}_class-target = "grub-common virtual/grub-bootconf"
SRC_URI += " \
file://cfg \
@@ -18,18 +20,15 @@ python __anonymous () {
prefix = "" if d.getVar('EFI_PROVIDER') == "grub-efi" else "grub-efi-"
if target == "x86_64":
grubtarget = 'x86_64'
- grubimage = prefix + "bootx64.efi"
elif re.match('i.86', target):
grubtarget = 'i386'
- grubimage = prefix + "bootia32.efi"
elif re.match('aarch64', target):
grubtarget = 'arm64'
- grubimage = prefix + "bootaa64.efi"
elif re.match('arm', target):
grubtarget = 'arm'
- grubimage = prefix + "bootarm.efi"
else:
raise bb.parse.SkipRecipe("grub-efi is incompatible with target %s" % target)
+ grubimage = prefix + d.getVar("EFI_BOOT_IMAGE")
d.setVar("GRUB_TARGET", grubtarget)
d.setVar("GRUB_IMAGE", grubimage)
prefix = "grub-efi-" if prefix == "" else ""
@@ -45,7 +44,7 @@ do_mkimage() {
cd ${B}
# Search for the grub.cfg on the local boot media by using the
# built in cfg file provided via this recipe
- grub-mkimage -c ../cfg -p /EFI/BOOT -d ./grub-core/ \
+ grub-mkimage -c ../cfg -p ${EFIDIR} -d ./grub-core/ \
-O ${GRUB_TARGET}-efi -o ./${GRUB_IMAGE_PREFIX}${GRUB_IMAGE} \
${GRUB_BUILDIN}
}
@@ -57,10 +56,8 @@ do_mkimage_class-native() {
}
do_install_append_class-target() {
- install -d ${D}/boot
- install -d ${D}/boot/EFI
- install -d ${D}/boot/EFI/BOOT
- install -m 644 ${B}/${GRUB_IMAGE_PREFIX}${GRUB_IMAGE} ${D}/boot/EFI/BOOT/${GRUB_IMAGE}
+ install -d ${D}${EFI_FILES_PATH}
+ install -m 644 ${B}/${GRUB_IMAGE_PREFIX}${GRUB_IMAGE} ${D}${EFI_FILES_PATH}/${GRUB_IMAGE}
}
do_install_class-native() {
@@ -100,7 +97,7 @@ addtask deploy after do_install before do_build
FILES_${PN} = "${libdir}/grub/${GRUB_TARGET}-efi \
${datadir}/grub \
- /boot/EFI/BOOT/${GRUB_IMAGE} \
+ ${EFI_FILES_PATH}/${GRUB_IMAGE} \
"
FILES_${PN}_remove_aarch64 = "${libdir}/grub/${GRUB_TARGET}-efi"
diff --git a/meta/recipes-bsp/grub/grub_2.04.bb b/meta/recipes-bsp/grub/grub_2.04.bb
index 1d1a45670c..9232ea8120 100644
--- a/meta/recipes-bsp/grub/grub_2.04.bb
+++ b/meta/recipes-bsp/grub/grub_2.04.bb
@@ -1,7 +1,7 @@
require grub2.inc
RDEPENDS_${PN}-common += "${PN}-editenv"
-RDEPENDS_${PN} += "diffutils freetype ${PN}-common"
+RDEPENDS_${PN} += "${PN}-common"
RPROVIDES_${PN}-editenv += "${PN}-efi-editenv"
diff --git a/meta/recipes-bsp/opensbi/opensbi_0.4.bb b/meta/recipes-bsp/opensbi/opensbi_0.5.bb
index b030436688..759bbbfdaa 100644
--- a/meta/recipes-bsp/opensbi/opensbi_0.4.bb
+++ b/meta/recipes-bsp/opensbi/opensbi_0.5.bb
@@ -8,7 +8,7 @@ require opensbi-payloads.inc
inherit autotools-brokensep deploy
-SRCREV = "ce228ee0919deb9957192d723eecc8aaae2697c6"
+SRCREV = "be92da280d87c38a2e0adc5d3f43bab7b5468f09"
SRC_URI = "git://github.com/riscv/opensbi.git \
file://0001-Makefile-Don-t-specify-mabi-or-march.patch \
"
diff --git a/meta/recipes-bsp/u-boot/files/0001-include-env.h-Ensure-ulong-is-defined.patch b/meta/recipes-bsp/u-boot/files/0001-include-env.h-Ensure-ulong-is-defined.patch
new file mode 100644
index 0000000000..b9118164df
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/0001-include-env.h-Ensure-ulong-is-defined.patch
@@ -0,0 +1,31 @@
+From 0565a080d153d5baaaacfeb5045a832e126f4f9e Mon Sep 17 00:00:00 2001
+From: Alistair Francis <alistair.francis@wdc.com>
+Date: Mon, 14 Oct 2019 17:37:30 -0700
+Subject: [PATCH] include/env.h: Ensure ulong is defined
+
+To fix these failures when building with musl:
+ include/env.h:166:1: error: unknown type name 'ulong'; did you mean 'long'?
+ensure that ulong is defined.
+
+Upstream-Status: Pending
+Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ include/env.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/env.h b/include/env.h
+index b72239f6a5..5ca49a3456 100644
+--- a/include/env.h
++++ b/include/env.h
+@@ -13,6 +13,8 @@
+ #include <stdbool.h>
+ #include <linux/types.h>
+
++typedef unsigned long ulong;
++
+ struct environment_s;
+
+ /* Value for environment validity */
+--
+2.23.0
+
diff --git a/meta/recipes-bsp/u-boot/u-boot-common.inc b/meta/recipes-bsp/u-boot/u-boot-common.inc
index ad043dbcde..c3e458e925 100644
--- a/meta/recipes-bsp/u-boot/u-boot-common.inc
+++ b/meta/recipes-bsp/u-boot/u-boot-common.inc
@@ -1,4 +1,8 @@
HOMEPAGE = "http://www.denx.de/wiki/U-Boot/WebHome"
+DESCRIPTION = "U-Boot, a boot loader for Embedded boards based on PowerPC, \
+ARM, MIPS and several other processors, which can be installed in a boot \
+ROM and used to initialize and test the hardware or to download and run \
+application code."
SECTION = "bootloaders"
DEPENDS += "flex-native bison-native"
@@ -8,8 +12,9 @@ PE = "1"
# We use the revision in order to avoid having to fetch it from the
# repo during parse
-SRCREV = "e5aee22e4be75e75a854ab64503fc80598bc2004"
+SRCREV = "61ba1244b548463dbfb3c5285b6b22e7c772c5bd"
-SRC_URI = "git://git.denx.de/u-boot.git"
+SRC_URI = "git://git.denx.de/u-boot.git \
+ "
S = "${WORKDIR}/git"
diff --git a/meta/recipes-bsp/u-boot/u-boot-fw-utils_2019.07.bb b/meta/recipes-bsp/u-boot/u-boot-fw-utils_2019.10.bb
index b5ce56847b..04321b7b66 100644
--- a/meta/recipes-bsp/u-boot/u-boot-fw-utils_2019.07.bb
+++ b/meta/recipes-bsp/u-boot/u-boot-fw-utils_2019.10.bb
@@ -3,6 +3,8 @@ require u-boot-common.inc
SUMMARY = "U-Boot bootloader fw_printenv/setenv utilities"
DEPENDS += "mtd-utils"
+SRC_URI += "file://0001-include-env.h-Ensure-ulong-is-defined.patch"
+
INSANE_SKIP_${PN} = "already-stripped"
EXTRA_OEMAKE_class-target = 'CROSS_COMPILE=${TARGET_PREFIX} CC="${CC} ${CFLAGS} ${LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" V=1'
EXTRA_OEMAKE_class-cross = 'HOSTCC="${CC} ${CFLAGS} ${LDFLAGS}" V=1'
diff --git a/meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb b/meta/recipes-bsp/u-boot/u-boot-tools_2019.10.bb
index bede984ef7..bede984ef7 100644
--- a/meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb
+++ b/meta/recipes-bsp/u-boot/u-boot-tools_2019.10.bb
diff --git a/meta/recipes-bsp/u-boot/u-boot_2019.07.bb b/meta/recipes-bsp/u-boot/u-boot_2019.10.bb
index 02d67c0db2..02d67c0db2 100644
--- a/meta/recipes-bsp/u-boot/u-boot_2019.07.bb
+++ b/meta/recipes-bsp/u-boot/u-boot_2019.10.bb
diff --git a/meta/recipes-bsp/usbinit/usbinit.bb b/meta/recipes-bsp/usbinit/usbinit.bb
index aba44b4068..ef98f0bf62 100644
--- a/meta/recipes-bsp/usbinit/usbinit.bb
+++ b/meta/recipes-bsp/usbinit/usbinit.bb
@@ -1,4 +1,8 @@
SUMMARY = "Initscript for enabling USB gadget Ethernet"
+DESCRIPTION = "This module allows ethernet emulation over USB, allowing for \
+all sorts of nifty things like SSH and NFS in one go plus charging over the \
+same wire, at higher speeds than most Wifi connections."
+HOMEPAGE = "http://linux-sunxi.org/USB_Gadget/Ethernet"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://${WORKDIR}/COPYING.GPL;md5=751419260aa954499f7abaabaa882bbe"
diff --git a/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch
new file mode 100644
index 0000000000..2fed99e1bb
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch
@@ -0,0 +1,64 @@
+Backport patch to fix CVE-2019-6471.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2019-6471
+
+CVE: CVE-2019-6471
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/3a9c7bb]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 3a9c7bb80d4a609b86427406d9dd783199920b5b Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Tue, 19 Mar 2019 14:14:21 +1100
+Subject: [PATCH] move item_out test inside lock in dns_dispatch_getnext()
+
+(cherry picked from commit 60c42f849d520564ed42e5ed0ba46b4b69c07712)
+---
+ lib/dns/dispatch.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/lib/dns/dispatch.c b/lib/dns/dispatch.c
+index 408beda367..3278db4a07 100644
+--- a/lib/dns/dispatch.c
++++ b/lib/dns/dispatch.c
+@@ -134,7 +134,7 @@ struct dns_dispentry {
+ isc_task_t *task;
+ isc_taskaction_t action;
+ void *arg;
+- bool item_out;
++ bool item_out;
+ dispsocket_t *dispsocket;
+ ISC_LIST(dns_dispatchevent_t) items;
+ ISC_LINK(dns_dispentry_t) link;
+@@ -3422,13 +3422,14 @@ dns_dispatch_getnext(dns_dispentry_t *resp, dns_dispatchevent_t **sockevent) {
+ disp = resp->disp;
+ REQUIRE(VALID_DISPATCH(disp));
+
+- REQUIRE(resp->item_out == true);
+- resp->item_out = false;
+-
+ ev = *sockevent;
+ *sockevent = NULL;
+
+ LOCK(&disp->lock);
++
++ REQUIRE(resp->item_out == true);
++ resp->item_out = false;
++
+ if (ev->buffer.base != NULL)
+ free_buffer(disp, ev->buffer.base, ev->buffer.length);
+ free_devent(disp, ev);
+@@ -3573,6 +3574,9 @@ dns_dispatch_removeresponse(dns_dispentry_t **resp,
+ isc_task_send(disp->task[0], &disp->ctlevent);
+ }
+
++/*
++ * disp must be locked.
++ */
+ static void
+ do_cancel(dns_dispatch_t *disp) {
+ dns_dispatchevent_t *ev;
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch
new file mode 100644
index 0000000000..48ae125f84
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch
@@ -0,0 +1,60 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ec2d50d]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From ec2d50da8d81814640e28593d912f4b96c7efece Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
+Date: Thu, 3 Jan 2019 14:17:43 +0100
+Subject: [PATCH 1/6] fix enforcement of tcp-clients (v1)
+
+tcp-clients settings could be exceeded in some cases by
+creating more and more active TCP clients that are over
+the set quota limit, which in the end could lead to a
+DoS attack by e.g. exhaustion of file descriptors.
+
+If TCP client we're closing went over the quota (so it's
+not attached to a quota) mark it as mortal - so that it
+will be destroyed and not set up to listen for new
+connections - unless it's the last client for a specific
+interface.
+
+(cherry picked from commit f97131d21b97381cef72b971b157345c1f9b4115)
+(cherry picked from commit 9689ffc485df8f971f0ad81ab8ab1f5389493776)
+---
+ bin/named/client.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index d482da7121..0739dd48af 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -421,8 +421,19 @@ exit_check(ns_client_t *client) {
+ isc_socket_detach(&client->tcpsocket);
+ }
+
+- if (client->tcpquota != NULL)
++ if (client->tcpquota != NULL) {
+ isc_quota_detach(&client->tcpquota);
++ } else {
++ /*
++ * We went over quota with this client, we don't
++ * want to restart listening unless this is the
++ * last client on this interface, which is
++ * checked later.
++ */
++ if (TCP_CLIENT(client)) {
++ client->mortal = true;
++ }
++ }
+
+ if (client->timerset) {
+ (void)isc_timer_reset(client->timer,
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch
new file mode 100644
index 0000000000..ca4e8b1a66
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch
@@ -0,0 +1,670 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/719f604]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 719f604e3fad5b7479bd14e2fa0ef4413f0a8fdc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
+Date: Fri, 4 Jan 2019 12:50:51 +0100
+Subject: [PATCH 2/6] tcp-clients could still be exceeded (v2)
+
+the TCP client quota could still be ineffective under some
+circumstances. this change:
+
+- improves quota accounting to ensure that TCP clients are
+ properly limited, while still guaranteeing that at least one client
+ is always available to serve TCP connections on each interface.
+- uses more descriptive names and removes one (ntcptarget) that
+ was no longer needed
+- adds comments
+
+(cherry picked from commit 924651f1d5e605cd186d03f4f7340bcc54d77cc2)
+(cherry picked from commit 55a7a458e30e47874d34bdf1079eb863a0512396)
+---
+ bin/named/client.c | 311 ++++++++++++++++++++-----
+ bin/named/include/named/client.h | 14 +-
+ bin/named/include/named/interfacemgr.h | 11 +-
+ bin/named/interfacemgr.c | 8 +-
+ 4 files changed, 267 insertions(+), 77 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 0739dd48af..a7b49a0f71 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -246,10 +246,11 @@ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
+ static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ dns_dispatch_t *disp, bool tcp);
+ static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- isc_socket_t *sock);
++ isc_socket_t *sock, ns_client_t *oldclient);
+ static inline bool
+-allowed(isc_netaddr_t *addr, dns_name_t *signer, isc_netaddr_t *ecs_addr,
+- uint8_t ecs_addrlen, uint8_t *ecs_scope, dns_acl_t *acl);
++allowed(isc_netaddr_t *addr, dns_name_t *signer,
++ isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
++ uint8_t *ecs_scope, dns_acl_t *acl)
+ static void compute_cookie(ns_client_t *client, uint32_t when,
+ uint32_t nonce, const unsigned char *secret,
+ isc_buffer_t *buf);
+@@ -405,8 +406,11 @@ exit_check(ns_client_t *client) {
+ */
+ INSIST(client->recursionquota == NULL);
+ INSIST(client->newstate <= NS_CLIENTSTATE_READY);
+- if (client->nreads > 0)
++
++ if (client->nreads > 0) {
+ dns_tcpmsg_cancelread(&client->tcpmsg);
++ }
++
+ if (client->nreads != 0) {
+ /* Still waiting for read cancel completion. */
+ return (true);
+@@ -416,25 +420,58 @@ exit_check(ns_client_t *client) {
+ dns_tcpmsg_invalidate(&client->tcpmsg);
+ client->tcpmsg_valid = false;
+ }
++
+ if (client->tcpsocket != NULL) {
+ CTRACE("closetcp");
+ isc_socket_detach(&client->tcpsocket);
++
++ if (client->tcpactive) {
++ LOCK(&client->interface->lock);
++ INSIST(client->interface->ntcpactive > 0);
++ client->interface->ntcpactive--;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = false;
++ }
+ }
+
+ if (client->tcpquota != NULL) {
+- isc_quota_detach(&client->tcpquota);
+- } else {
+ /*
+- * We went over quota with this client, we don't
+- * want to restart listening unless this is the
+- * last client on this interface, which is
+- * checked later.
++ * If we are not in a pipeline group, or
++ * we are the last client in the group, detach from
++ * tcpquota; otherwise, transfer the quota to
++ * another client in the same group.
+ */
+- if (TCP_CLIENT(client)) {
+- client->mortal = true;
++ if (!ISC_LINK_LINKED(client, glink) ||
++ (client->glink.next == NULL &&
++ client->glink.prev == NULL))
++ {
++ isc_quota_detach(&client->tcpquota);
++ } else if (client->glink.next != NULL) {
++ INSIST(client->glink.next->tcpquota == NULL);
++ client->glink.next->tcpquota = client->tcpquota;
++ client->tcpquota = NULL;
++ } else {
++ INSIST(client->glink.prev->tcpquota == NULL);
++ client->glink.prev->tcpquota = client->tcpquota;
++ client->tcpquota = NULL;
+ }
+ }
+
++ /*
++ * Unlink from pipeline group.
++ */
++ if (ISC_LINK_LINKED(client, glink)) {
++ if (client->glink.next != NULL) {
++ client->glink.next->glink.prev =
++ client->glink.prev;
++ }
++ if (client->glink.prev != NULL) {
++ client->glink.prev->glink.next =
++ client->glink.next;
++ }
++ ISC_LINK_INIT(client, glink);
++ }
++
+ if (client->timerset) {
+ (void)isc_timer_reset(client->timer,
+ isc_timertype_inactive,
+@@ -455,15 +492,16 @@ exit_check(ns_client_t *client) {
+ * that already. Check whether this client needs to remain
+ * active and force it to go inactive if not.
+ *
+- * UDP clients go inactive at this point, but TCP clients
+- * may remain active if we have fewer active TCP client
+- * objects than desired due to an earlier quota exhaustion.
++ * UDP clients go inactive at this point, but a TCP client
++ * will needs to remain active if no other clients are
++ * listening for TCP requests on this interface, to
++ * prevent this interface from going nonresponsive.
+ */
+ if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
+ LOCK(&client->interface->lock);
+- if (client->interface->ntcpcurrent <
+- client->interface->ntcptarget)
++ if (client->interface->ntcpaccepting == 0) {
+ client->mortal = false;
++ }
+ UNLOCK(&client->interface->lock);
+ }
+
+@@ -472,15 +510,17 @@ exit_check(ns_client_t *client) {
+ * queue for recycling.
+ */
+ if (client->mortal) {
+- if (client->newstate > NS_CLIENTSTATE_INACTIVE)
++ if (client->newstate > NS_CLIENTSTATE_INACTIVE) {
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
++ }
+ }
+
+ if (NS_CLIENTSTATE_READY == client->newstate) {
+ if (TCP_CLIENT(client)) {
+ client_accept(client);
+- } else
++ } else {
+ client_udprecv(client);
++ }
+ client->newstate = NS_CLIENTSTATE_MAX;
+ return (true);
+ }
+@@ -492,41 +532,57 @@ exit_check(ns_client_t *client) {
+ /*
+ * We are trying to enter the inactive state.
+ */
+- if (client->naccepts > 0)
++ if (client->naccepts > 0) {
+ isc_socket_cancel(client->tcplistener, client->task,
+ ISC_SOCKCANCEL_ACCEPT);
++ }
+
+ /* Still waiting for accept cancel completion. */
+- if (! (client->naccepts == 0))
++ if (! (client->naccepts == 0)) {
+ return (true);
++ }
+
+ /* Accept cancel is complete. */
+- if (client->nrecvs > 0)
++ if (client->nrecvs > 0) {
+ isc_socket_cancel(client->udpsocket, client->task,
+ ISC_SOCKCANCEL_RECV);
++ }
+
+ /* Still waiting for recv cancel completion. */
+- if (! (client->nrecvs == 0))
++ if (! (client->nrecvs == 0)) {
+ return (true);
++ }
+
+ /* Still waiting for control event to be delivered */
+- if (client->nctls > 0)
++ if (client->nctls > 0) {
+ return (true);
+-
+- /* Deactivate the client. */
+- if (client->interface)
+- ns_interface_detach(&client->interface);
++ }
+
+ INSIST(client->naccepts == 0);
+ INSIST(client->recursionquota == NULL);
+- if (client->tcplistener != NULL)
++ if (client->tcplistener != NULL) {
+ isc_socket_detach(&client->tcplistener);
+
+- if (client->udpsocket != NULL)
++ if (client->tcpactive) {
++ LOCK(&client->interface->lock);
++ INSIST(client->interface->ntcpactive > 0);
++ client->interface->ntcpactive--;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = false;
++ }
++ }
++ if (client->udpsocket != NULL) {
+ isc_socket_detach(&client->udpsocket);
++ }
+
+- if (client->dispatch != NULL)
++ /* Deactivate the client. */
++ if (client->interface != NULL) {
++ ns_interface_detach(&client->interface);
++ }
++
++ if (client->dispatch != NULL) {
+ dns_dispatch_detach(&client->dispatch);
++ }
+
+ client->attributes = 0;
+ client->mortal = false;
+@@ -551,10 +607,13 @@ exit_check(ns_client_t *client) {
+ client->newstate = NS_CLIENTSTATE_MAX;
+ if (!ns_g_clienttest && manager != NULL &&
+ !manager->exiting)
++ {
+ ISC_QUEUE_PUSH(manager->inactive, client,
+ ilink);
+- if (client->needshutdown)
++ }
++ if (client->needshutdown) {
+ isc_task_shutdown(client->task);
++ }
+ return (true);
+ }
+ }
+@@ -675,7 +734,6 @@ client_start(isc_task_t *task, isc_event_t *event) {
+ }
+ }
+
+-
+ /*%
+ * The client's task has received a shutdown event.
+ */
+@@ -2507,17 +2565,12 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ /*
+ * Pipeline TCP query processing.
+ */
+- if (client->message->opcode != dns_opcode_query)
++ if (client->message->opcode != dns_opcode_query) {
+ client->pipelined = false;
++ }
+ if (TCP_CLIENT(client) && client->pipelined) {
+- result = isc_quota_reserve(&ns_g_server->tcpquota);
+- if (result == ISC_R_SUCCESS)
+- result = ns_client_replace(client);
++ result = ns_client_replace(client);
+ if (result != ISC_R_SUCCESS) {
+- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
+- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
+- "no more TCP clients(read): %s",
+- isc_result_totext(result));
+ client->pipelined = false;
+ }
+ }
+@@ -3087,6 +3140,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->filter_aaaa = dns_aaaa_ok;
+ #endif
+ client->needshutdown = ns_g_clienttest;
++ client->tcpactive = false;
+
+ ISC_EVENT_INIT(&client->ctlevent, sizeof(client->ctlevent), 0, NULL,
+ NS_EVENT_CLIENTCONTROL, client_start, client, client,
+@@ -3100,6 +3154,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->formerrcache.id = 0;
+ ISC_LINK_INIT(client, link);
+ ISC_LINK_INIT(client, rlink);
++ ISC_LINK_INIT(client, glink);
+ ISC_QLINK_INIT(client, ilink);
+ client->keytag = NULL;
+ client->keytag_len = 0;
+@@ -3193,12 +3248,19 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+
+ INSIST(client->state == NS_CLIENTSTATE_READY);
+
++ /*
++ * The accept() was successful and we're now establishing a new
++ * connection. We need to make note of it in the client and
++ * interface objects so client objects can do the right thing
++ * when going inactive in exit_check() (see comments in
++ * client_accept() for details).
++ */
+ INSIST(client->naccepts == 1);
+ client->naccepts--;
+
+ LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpcurrent > 0);
+- client->interface->ntcpcurrent--;
++ INSIST(client->interface->ntcpaccepting > 0);
++ client->interface->ntcpaccepting--;
+ UNLOCK(&client->interface->lock);
+
+ /*
+@@ -3232,6 +3294,9 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
+ "accept failed: %s",
+ isc_result_totext(nevent->result));
++ if (client->tcpquota != NULL) {
++ isc_quota_detach(&client->tcpquota);
++ }
+ }
+
+ if (exit_check(client))
+@@ -3270,18 +3335,12 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ * deny service to legitimate TCP clients.
+ */
+ client->pipelined = false;
+- result = isc_quota_attach(&ns_g_server->tcpquota,
+- &client->tcpquota);
+- if (result == ISC_R_SUCCESS)
+- result = ns_client_replace(client);
+- if (result != ISC_R_SUCCESS) {
+- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
+- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
+- "no more TCP clients(accept): %s",
+- isc_result_totext(result));
+- } else if (ns_g_server->keepresporder == NULL ||
+- !allowed(&netaddr, NULL, NULL, 0, NULL,
+- ns_g_server->keepresporder)) {
++ result = ns_client_replace(client);
++ if (result == ISC_R_SUCCESS &&
++ (client->sctx->keepresporder == NULL ||
++ !allowed(&netaddr, NULL, NULL, 0, NULL,
++ ns_g_server->keepresporder)))
++ {
+ client->pipelined = true;
+ }
+
+@@ -3298,12 +3357,80 @@ client_accept(ns_client_t *client) {
+
+ CTRACE("accept");
+
++ /*
++ * The tcpquota object can only be simultaneously referenced a
++ * pre-defined number of times; this is configured by 'tcp-clients'
++ * in named.conf. If we can't attach to it here, that means the TCP
++ * client quota has been exceeded.
++ */
++ result = isc_quota_attach(&client->sctx->tcpquota,
++ &client->tcpquota);
++ if (result != ISC_R_SUCCESS) {
++ bool exit;
++
++ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
++ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
++ "no more TCP clients: %s",
++ isc_result_totext(result));
++
++ /*
++ * We have exceeded the system-wide TCP client
++ * quota. But, we can't just block this accept
++ * in all cases, because if we did, a heavy TCP
++ * load on other interfaces might cause this
++ * interface to be starved, with no clients able
++ * to accept new connections.
++ *
++ * So, we check here to see if any other client
++ * is already servicing TCP queries on this
++ * interface (whether accepting, reading, or
++ * processing).
++ *
++ * If so, then it's okay *not* to call
++ * accept - we can let this client to go inactive
++ * and the other one handle the next connection
++ * when it's ready.
++ *
++ * But if not, then we need to be a little bit
++ * flexible about the quota. We allow *one* extra
++ * TCP client through, to ensure we're listening on
++ * every interface.
++ *
++ * (Note: In practice this means that the *real*
++ * TCP client quota is tcp-clients plus the number
++ * of interfaces.)
++ */
++ LOCK(&client->interface->lock);
++ exit = (client->interface->ntcpactive > 0);
++ UNLOCK(&client->interface->lock);
++
++ if (exit) {
++ client->newstate = NS_CLIENTSTATE_INACTIVE;
++ (void)exit_check(client);
++ return;
++ }
++ }
++
++ /*
++ * By incrementing the interface's ntcpactive counter we signal
++ * that there is at least one client servicing TCP queries for the
++ * interface.
++ *
++ * We also make note of the fact in the client itself with the
++ * tcpactive flag. This ensures proper accounting by preventing
++ * us from accidentally incrementing or decrementing ntcpactive
++ * more than once per client object.
++ */
++ if (!client->tcpactive) {
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive++;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = true;
++ }
++
+ result = isc_socket_accept(client->tcplistener, client->task,
+ client_newconn, client);
+ if (result != ISC_R_SUCCESS) {
+- UNEXPECTED_ERROR(__FILE__, __LINE__,
+- "isc_socket_accept() failed: %s",
+- isc_result_totext(result));
+ /*
+ * XXXRTH What should we do? We're trying to accept but
+ * it didn't work. If we just give up, then TCP
+@@ -3311,12 +3438,39 @@ client_accept(ns_client_t *client) {
+ *
+ * For now, we just go idle.
+ */
++ UNEXPECTED_ERROR(__FILE__, __LINE__,
++ "isc_socket_accept() failed: %s",
++ isc_result_totext(result));
++ if (client->tcpquota != NULL) {
++ isc_quota_detach(&client->tcpquota);
++ }
+ return;
+ }
++
++ /*
++ * The client's 'naccepts' counter indicates that this client has
++ * called accept() and is waiting for a new connection. It should
++ * never exceed 1.
++ */
+ INSIST(client->naccepts == 0);
+ client->naccepts++;
++
++ /*
++ * The interface's 'ntcpaccepting' counter is incremented when
++ * any client calls accept(), and decremented in client_newconn()
++ * once the connection is established.
++ *
++ * When the client object is shutting down after handling a TCP
++ * request (see exit_check()), it looks to see whether this value is
++ * non-zero. If so, that means another client has already called
++ * accept() and is waiting to establish the next connection, which
++ * means the first client is free to go inactive. Otherwise,
++ * the first client must come back and call accept() again; this
++ * guarantees there will always be at least one client listening
++ * for new TCP connections on each interface.
++ */
+ LOCK(&client->interface->lock);
+- client->interface->ntcpcurrent++;
++ client->interface->ntcpaccepting++;
+ UNLOCK(&client->interface->lock);
+ }
+
+@@ -3390,13 +3544,14 @@ ns_client_replace(ns_client_t *client) {
+ tcp = TCP_CLIENT(client);
+ if (tcp && client->pipelined) {
+ result = get_worker(client->manager, client->interface,
+- client->tcpsocket);
++ client->tcpsocket, client);
+ } else {
+ result = get_client(client->manager, client->interface,
+ client->dispatch, tcp);
+ }
+- if (result != ISC_R_SUCCESS)
++ if (result != ISC_R_SUCCESS) {
+ return (result);
++ }
+
+ /*
+ * The responsibility for listening for new requests is hereby
+@@ -3585,6 +3740,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ client->attributes |= NS_CLIENTATTR_TCP;
+ isc_socket_attach(ifp->tcpsocket,
+ &client->tcplistener);
++
+ } else {
+ isc_socket_t *sock;
+
+@@ -3602,7 +3758,8 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ }
+
+ static isc_result_t
+-get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
++get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
++ ns_client_t *oldclient)
+ {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc_event_t *ev;
+@@ -3610,6 +3767,7 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
+ MTRACE("get worker");
+
+ REQUIRE(manager != NULL);
++ REQUIRE(oldclient != NULL);
+
+ if (manager->exiting)
+ return (ISC_R_SHUTTINGDOWN);
+@@ -3642,7 +3800,28 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+- client->tcpquota = &ns_g_server->tcpquota;
++
++ /*
++ * Transfer TCP quota to the new client.
++ */
++ INSIST(client->tcpquota == NULL);
++ INSIST(oldclient->tcpquota != NULL);
++ client->tcpquota = oldclient->tcpquota;
++ oldclient->tcpquota = NULL;
++
++ /*
++ * Link to a pipeline group, creating it if needed.
++ */
++ if (!ISC_LINK_LINKED(oldclient, glink)) {
++ oldclient->glink.next = NULL;
++ oldclient->glink.prev = NULL;
++ }
++ client->glink.next = oldclient->glink.next;
++ client->glink.prev = oldclient;
++ if (oldclient->glink.next != NULL) {
++ oldclient->glink.next->glink.prev = client;
++ }
++ oldclient->glink.next = client;
+
+ client->dscp = ifp->dscp;
+
+@@ -3656,6 +3835,12 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
+ (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
+ client->peeraddr_valid = true;
+
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive++;
++ UNLOCK(&client->interface->lock);
++
++ client->tcpactive = true;
++
+ INSIST(client->tcpmsg_valid == false);
+ dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
+ client->tcpmsg_valid = true;
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index b23a7b191d..1f7973f9c5 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -94,7 +94,8 @@ struct ns_client {
+ int nupdates;
+ int nctls;
+ int references;
+- bool needshutdown; /*
++ bool tcpactive;
++ bool needshutdown; /*
+ * Used by clienttest to get
+ * the client to go from
+ * inactive to free state
+@@ -130,9 +131,9 @@ struct ns_client {
+ isc_stdtime_t now;
+ isc_time_t tnow;
+ dns_name_t signername; /*%< [T]SIG key name */
+- dns_name_t * signer; /*%< NULL if not valid sig */
+- bool mortal; /*%< Die after handling request */
+- bool pipelined; /*%< TCP queries not in sequence */
++ dns_name_t *signer; /*%< NULL if not valid sig */
++ bool mortal; /*%< Die after handling request */
++ bool pipelined; /*%< TCP queries not in sequence */
+ isc_quota_t *tcpquota;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+@@ -143,8 +144,8 @@ struct ns_client {
+ isc_sockaddr_t destsockaddr;
+
+ isc_netaddr_t ecs_addr; /*%< EDNS client subnet */
+- uint8_t ecs_addrlen;
+- uint8_t ecs_scope;
++ uint8_t ecs_addrlen;
++ uint8_t ecs_scope;
+
+ struct in6_pktinfo pktinfo;
+ isc_dscp_t dscp;
+@@ -166,6 +167,7 @@ struct ns_client {
+
+ ISC_LINK(ns_client_t) link;
+ ISC_LINK(ns_client_t) rlink;
++ ISC_LINK(ns_client_t) glink;
+ ISC_QLINK(ns_client_t) ilink;
+ unsigned char cookie[8];
+ uint32_t expire;
+diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
+index 7d1883e1e8..61b08826a6 100644
+--- a/bin/named/include/named/interfacemgr.h
++++ b/bin/named/include/named/interfacemgr.h
+@@ -77,9 +77,14 @@ struct ns_interface {
+ /*%< UDP dispatchers. */
+ isc_socket_t * tcpsocket; /*%< TCP socket. */
+ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
+- int ntcptarget; /*%< Desired number of concurrent
+- TCP accepts */
+- int ntcpcurrent; /*%< Current ditto, locked */
++ int ntcpaccepting; /*%< Number of clients
++ ready to accept new
++ TCP connections on this
++ interface */
++ int ntcpactive; /*%< Number of clients
++ servicing TCP queries
++ (whether accepting or
++ connected) */
+ int nudpdispatch; /*%< Number of UDP dispatches */
+ ns_clientmgr_t * clientmgr; /*%< Client manager. */
+ ISC_LINK(ns_interface_t) link;
+diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
+index 419927bf54..955096ef47 100644
+--- a/bin/named/interfacemgr.c
++++ b/bin/named/interfacemgr.c
+@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
+ * connections will be handled in parallel even though there is
+ * only one client initially.
+ */
+- ifp->ntcptarget = 1;
+- ifp->ntcpcurrent = 0;
++ ifp->ntcpaccepting = 0;
++ ifp->ntcpactive = 0;
+ ifp->nudpdispatch = 0;
+
+ ifp->dscp = -1;
+@@ -522,9 +522,7 @@ ns_interface_accepttcp(ns_interface_t *ifp) {
+ */
+ (void)isc_socket_filter(ifp->tcpsocket, "dataready");
+
+- result = ns_clientmgr_createclients(ifp->clientmgr,
+- ifp->ntcptarget, ifp,
+- true);
++ result = ns_clientmgr_createclients(ifp->clientmgr, 1, ifp, true);
+ if (result != ISC_R_SUCCESS) {
+ UNEXPECTED_ERROR(__FILE__, __LINE__,
+ "TCP ns_clientmgr_createclients(): %s",
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch
new file mode 100644
index 0000000000..032cfb8c44
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch
@@ -0,0 +1,278 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/366b4e1]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 366b4e1ede8aed690e981e07137cb1cb77879c36 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org>
+Date: Thu, 17 Jan 2019 15:53:38 +0100
+Subject: [PATCH 3/6] use reference counter for pipeline groups (v3)
+
+Track pipeline groups using a shared reference counter
+instead of a linked list.
+
+(cherry picked from commit 513afd33eb17d5dc41a3f0d2d38204ef8c5f6f91)
+(cherry picked from commit 9446629b730c59c4215f08d37fbaf810282fbccb)
+---
+ bin/named/client.c | 171 ++++++++++++++++++++-----------
+ bin/named/include/named/client.h | 2 +-
+ 2 files changed, 110 insertions(+), 63 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index a7b49a0f71..277656cef0 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -299,6 +299,75 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
+ }
+ }
+
++/*%
++ * Allocate a reference counter that will track the number of client structures
++ * using the TCP connection that 'client' called accept() for. This counter
++ * will be shared between all client structures associated with this TCP
++ * connection.
++ */
++static void
++pipeline_init(ns_client_t *client) {
++ isc_refcount_t *refs;
++
++ REQUIRE(client->pipeline_refs == NULL);
++
++ /*
++ * A global memory context is used for the allocation as different
++ * client structures may have different memory contexts assigned and a
++ * reference counter allocated here might need to be freed by a
++ * different client. The performance impact caused by memory context
++ * contention here is expected to be negligible, given that this code
++ * is only executed for TCP connections.
++ */
++ refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
++ isc_refcount_init(refs, 1);
++ client->pipeline_refs = refs;
++}
++
++/*%
++ * Increase the count of client structures using the TCP connection that
++ * 'source' is associated with and put a pointer to that count in 'target',
++ * thus associating it with the same TCP connection.
++ */
++static void
++pipeline_attach(ns_client_t *source, ns_client_t *target) {
++ int old_refs;
++
++ REQUIRE(source->pipeline_refs != NULL);
++ REQUIRE(target->pipeline_refs == NULL);
++
++ old_refs = isc_refcount_increment(source->pipeline_refs);
++ INSIST(old_refs > 0);
++ target->pipeline_refs = source->pipeline_refs;
++}
++
++/*%
++ * Decrease the count of client structures using the TCP connection that
++ * 'client' is associated with. If this is the last client using this TCP
++ * connection, free the reference counter and return true; otherwise, return
++ * false.
++ */
++static bool
++pipeline_detach(ns_client_t *client) {
++ isc_refcount_t *refs;
++ int old_refs;
++
++ REQUIRE(client->pipeline_refs != NULL);
++
++ refs = client->pipeline_refs;
++ client->pipeline_refs = NULL;
++
++ old_refs = isc_refcount_decrement(refs);
++ INSIST(old_refs > 0);
++
++ if (old_refs == 1) {
++ isc_mem_free(client->sctx->mctx, refs);
++ return (true);
++ }
++
++ return (false);
++}
++
+ /*%
+ * Check for a deactivation or shutdown request and take appropriate
+ * action. Returns true if either is in progress; in this case
+@@ -421,6 +490,40 @@ exit_check(ns_client_t *client) {
+ client->tcpmsg_valid = false;
+ }
+
++ if (client->tcpquota != NULL) {
++ if (client->pipeline_refs == NULL ||
++ pipeline_detach(client))
++ {
++ /*
++ * Only detach from the TCP client quota if
++ * there are no more client structures using
++ * this TCP connection.
++ *
++ * Note that we check 'pipeline_refs' and not
++ * 'pipelined' because in some cases (e.g.
++ * after receiving a request with an opcode
++ * different than QUERY) 'pipelined' is set to
++ * false after the reference counter gets
++ * allocated in pipeline_init() and we must
++ * still drop our reference as failing to do so
++ * would prevent the reference counter itself
++ * from being freed.
++ */
++ isc_quota_detach(&client->tcpquota);
++ } else {
++ /*
++ * There are other client structures using this
++ * TCP connection, so we cannot detach from the
++ * TCP client quota to prevent excess TCP
++ * connections from being accepted. However,
++ * this client structure might later be reused
++ * for accepting new connections and thus must
++ * have its 'tcpquota' field set to NULL.
++ */
++ client->tcpquota = NULL;
++ }
++ }
++
+ if (client->tcpsocket != NULL) {
+ CTRACE("closetcp");
+ isc_socket_detach(&client->tcpsocket);
+@@ -434,44 +537,6 @@ exit_check(ns_client_t *client) {
+ }
+ }
+
+- if (client->tcpquota != NULL) {
+- /*
+- * If we are not in a pipeline group, or
+- * we are the last client in the group, detach from
+- * tcpquota; otherwise, transfer the quota to
+- * another client in the same group.
+- */
+- if (!ISC_LINK_LINKED(client, glink) ||
+- (client->glink.next == NULL &&
+- client->glink.prev == NULL))
+- {
+- isc_quota_detach(&client->tcpquota);
+- } else if (client->glink.next != NULL) {
+- INSIST(client->glink.next->tcpquota == NULL);
+- client->glink.next->tcpquota = client->tcpquota;
+- client->tcpquota = NULL;
+- } else {
+- INSIST(client->glink.prev->tcpquota == NULL);
+- client->glink.prev->tcpquota = client->tcpquota;
+- client->tcpquota = NULL;
+- }
+- }
+-
+- /*
+- * Unlink from pipeline group.
+- */
+- if (ISC_LINK_LINKED(client, glink)) {
+- if (client->glink.next != NULL) {
+- client->glink.next->glink.prev =
+- client->glink.prev;
+- }
+- if (client->glink.prev != NULL) {
+- client->glink.prev->glink.next =
+- client->glink.next;
+- }
+- ISC_LINK_INIT(client, glink);
+- }
+-
+ if (client->timerset) {
+ (void)isc_timer_reset(client->timer,
+ isc_timertype_inactive,
+@@ -3130,6 +3195,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ dns_name_init(&client->signername, NULL);
+ client->mortal = false;
+ client->pipelined = false;
++ client->pipeline_refs = NULL;
+ client->tcpquota = NULL;
+ client->recursionquota = NULL;
+ client->interface = NULL;
+@@ -3154,7 +3220,6 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->formerrcache.id = 0;
+ ISC_LINK_INIT(client, link);
+ ISC_LINK_INIT(client, rlink);
+- ISC_LINK_INIT(client, glink);
+ ISC_QLINK_INIT(client, ilink);
+ client->keytag = NULL;
+ client->keytag_len = 0;
+@@ -3341,6 +3406,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
+ ns_g_server->keepresporder)))
+ {
++ pipeline_init(client);
+ client->pipelined = true;
+ }
+
+@@ -3800,35 +3866,16 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+-
+- /*
+- * Transfer TCP quota to the new client.
+- */
+- INSIST(client->tcpquota == NULL);
+- INSIST(oldclient->tcpquota != NULL);
+- client->tcpquota = oldclient->tcpquota;
+- oldclient->tcpquota = NULL;
+-
+- /*
+- * Link to a pipeline group, creating it if needed.
+- */
+- if (!ISC_LINK_LINKED(oldclient, glink)) {
+- oldclient->glink.next = NULL;
+- oldclient->glink.prev = NULL;
+- }
+- client->glink.next = oldclient->glink.next;
+- client->glink.prev = oldclient;
+- if (oldclient->glink.next != NULL) {
+- oldclient->glink.next->glink.prev = client;
+- }
+- oldclient->glink.next = client;
++ client->tcpquota = &client->sctx->tcpquota;
+
+ client->dscp = ifp->dscp;
+
+ client->attributes |= NS_CLIENTATTR_TCP;
+- client->pipelined = true;
+ client->mortal = true;
+
++ pipeline_attach(oldclient, client);
++ client->pipelined = true;
++
+ isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
+ isc_socket_attach(sock, &client->tcpsocket);
+ isc_socket_setname(client->tcpsocket, "worker-tcp", NULL);
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index 1f7973f9c5..aeed9ccdda 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -134,6 +134,7 @@ struct ns_client {
+ dns_name_t *signer; /*%< NULL if not valid sig */
+ bool mortal; /*%< Die after handling request */
+ bool pipelined; /*%< TCP queries not in sequence */
++ isc_refcount_t *pipeline_refs;
+ isc_quota_t *tcpquota;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+@@ -167,7 +168,6 @@ struct ns_client {
+
+ ISC_LINK(ns_client_t) link;
+ ISC_LINK(ns_client_t) rlink;
+- ISC_LINK(ns_client_t) glink;
+ ISC_QLINK(ns_client_t) ilink;
+ unsigned char cookie[8];
+ uint32_t expire;
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch
new file mode 100644
index 0000000000..034ab13303
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch
@@ -0,0 +1,512 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001
+From: Evan Hunt <each@isc.org>
+Date: Fri, 5 Apr 2019 16:12:18 -0700
+Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks
+
+- ensure that tcpactive is cleaned up correctly when accept() fails.
+- set 'client->tcpattached' when the client is attached to the tcpquota.
+ carry this value on to new clients sharing the same pipeline group.
+ don't call isc_quota_detach() on the tcpquota unless tcpattached is
+ set. this way clients that were allowed to accept TCP connections
+ despite being over quota (and therefore, were never attached to the
+ quota) will not inadvertently detach from it and mess up the
+ accounting.
+- simplify the code for tcpquota disconnection by using a new function
+ tcpquota_disconnect().
+- before deciding whether to reject a new connection due to quota
+ exhaustion, check to see whether there are at least two active
+ clients. previously, this was "at least one", but that could be
+ insufficient if there was one other client in READING state (waiting
+ for messages on an open connection) but none in READY (listening
+ for new connections).
+- before deciding whether a TCP client object can to go inactive, we
+ must ensure there are enough other clients to maintain service
+ afterward -- both accepting new connections and reading/processing new
+ queries. A TCP client can't shut down unless at least one
+ client is accepting new connections and (in the case of pipelined
+ clients) at least one additional client is waiting to read.
+
+(cherry picked from commit c7394738b2445c16f728a88394864dd61baad900)
+(cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856)
+(cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3)
+---
+ bin/named/client.c | 244 +++++++++++++++++++------------
+ bin/named/include/named/client.h | 3 +-
+ 2 files changed, 152 insertions(+), 95 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 277656cef0..61e96dd28c 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event);
+ static void client_request(isc_task_t *task, isc_event_t *event);
+ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
+ static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, bool tcp);
++ dns_dispatch_t *disp, ns_client_t *oldclient,
++ bool tcp);
+ static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ isc_socket_t *sock, ns_client_t *oldclient);
+ static inline bool
+ allowed(isc_netaddr_t *addr, dns_name_t *signer,
+ isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
+- uint8_t *ecs_scope, dns_acl_t *acl)
++ uint8_t *ecs_scope, dns_acl_t *acl);
+ static void compute_cookie(ns_client_t *client, uint32_t when,
+ uint32_t nonce, const unsigned char *secret,
+ isc_buffer_t *buf);
+@@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) {
+ * contention here is expected to be negligible, given that this code
+ * is only executed for TCP connections.
+ */
+- refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
++ refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
+ isc_refcount_init(refs, 1);
+ client->pipeline_refs = refs;
+ }
+@@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) {
+ */
+ static void
+ pipeline_attach(ns_client_t *source, ns_client_t *target) {
+- int old_refs;
++ int refs;
+
+ REQUIRE(source->pipeline_refs != NULL);
+ REQUIRE(target->pipeline_refs == NULL);
+
+- old_refs = isc_refcount_increment(source->pipeline_refs);
+- INSIST(old_refs > 0);
++ isc_refcount_increment(source->pipeline_refs, &refs);
++ INSIST(refs > 1);
+ target->pipeline_refs = source->pipeline_refs;
+ }
+
+@@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) {
+ */
+ static bool
+ pipeline_detach(ns_client_t *client) {
+- isc_refcount_t *refs;
+- int old_refs;
++ isc_refcount_t *refcount;
++ int refs;
+
+ REQUIRE(client->pipeline_refs != NULL);
+
+- refs = client->pipeline_refs;
++ refcount = client->pipeline_refs;
+ client->pipeline_refs = NULL;
+
+- old_refs = isc_refcount_decrement(refs);
+- INSIST(old_refs > 0);
++ isc_refcount_decrement(refcount, refs);
+
+- if (old_refs == 1) {
+- isc_mem_free(client->sctx->mctx, refs);
++ if (refs == 0) {
++ isc_mem_free(ns_g_mctx, refs);
+ return (true);
+ }
+
+ return (false);
+ }
+
++/*
++ * Detach a client from the TCP client quota if appropriate, and set
++ * the quota pointer to NULL.
++ *
++ * Sometimes when the TCP client quota is exhausted but there are no other
++ * clients servicing the interface, a client will be allowed to continue
++ * running despite not having been attached to the quota. In this event,
++ * the TCP quota was never attached to the client, so when the client (or
++ * associated pipeline group) shuts down, the quota must NOT be detached.
++ *
++ * Otherwise, if the quota pointer is set, it should be detached. If not
++ * set at all, we just return without doing anything.
++ */
++static void
++tcpquota_disconnect(ns_client_t *client) {
++ if (client->tcpquota == NULL) {
++ return;
++ }
++
++ if (client->tcpattached) {
++ isc_quota_detach(&client->tcpquota);
++ client->tcpattached = false;
++ } else {
++ client->tcpquota = NULL;
++ }
++}
++
+ /*%
+ * Check for a deactivation or shutdown request and take appropriate
+ * action. Returns true if either is in progress; in this case
+@@ -490,38 +517,31 @@ exit_check(ns_client_t *client) {
+ client->tcpmsg_valid = false;
+ }
+
+- if (client->tcpquota != NULL) {
+- if (client->pipeline_refs == NULL ||
+- pipeline_detach(client))
+- {
+- /*
+- * Only detach from the TCP client quota if
+- * there are no more client structures using
+- * this TCP connection.
+- *
+- * Note that we check 'pipeline_refs' and not
+- * 'pipelined' because in some cases (e.g.
+- * after receiving a request with an opcode
+- * different than QUERY) 'pipelined' is set to
+- * false after the reference counter gets
+- * allocated in pipeline_init() and we must
+- * still drop our reference as failing to do so
+- * would prevent the reference counter itself
+- * from being freed.
+- */
+- isc_quota_detach(&client->tcpquota);
+- } else {
+- /*
+- * There are other client structures using this
+- * TCP connection, so we cannot detach from the
+- * TCP client quota to prevent excess TCP
+- * connections from being accepted. However,
+- * this client structure might later be reused
+- * for accepting new connections and thus must
+- * have its 'tcpquota' field set to NULL.
+- */
+- client->tcpquota = NULL;
+- }
++ /*
++ * Detach from pipeline group and from TCP client quota,
++ * if appropriate.
++ *
++ * - If no pipeline group is active, attempt to
++ * detach from the TCP client quota.
++ *
++ * - If a pipeline group is active, detach from it;
++ * if the return code indicates that there no more
++ * clients left if this pipeline group, we also detach
++ * from the TCP client quota.
++ *
++ * - Otherwise we don't try to detach, we just set the
++ * TCP quota pointer to NULL if it wasn't NULL already.
++ *
++ * tcpquota_disconnect() will set tcpquota to NULL, either
++ * by detaching it or by assignment, depending on the
++ * needs of the client. See the comments on that function
++ * for further information.
++ */
++ if (client->pipeline_refs == NULL || pipeline_detach(client)) {
++ tcpquota_disconnect(client);
++ } else {
++ client->tcpquota = NULL;
++ client->tcpattached = false;
+ }
+
+ if (client->tcpsocket != NULL) {
+@@ -544,8 +564,6 @@ exit_check(ns_client_t *client) {
+ client->timerset = false;
+ }
+
+- client->pipelined = false;
+-
+ client->peeraddr_valid = false;
+
+ client->state = NS_CLIENTSTATE_READY;
+@@ -558,18 +576,27 @@ exit_check(ns_client_t *client) {
+ * active and force it to go inactive if not.
+ *
+ * UDP clients go inactive at this point, but a TCP client
+- * will needs to remain active if no other clients are
+- * listening for TCP requests on this interface, to
+- * prevent this interface from going nonresponsive.
++ * may need to remain active and go into ready state if
++ * no other clients are available to listen for TCP
++ * requests on this interface or (in the case of pipelined
++ * clients) to read for additional messages on the current
++ * connection.
+ */
+ if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
+ LOCK(&client->interface->lock);
+- if (client->interface->ntcpaccepting == 0) {
++ if ((client->interface->ntcpaccepting == 0 ||
++ (client->pipelined &&
++ client->interface->ntcpactive < 2)) &&
++ client->newstate != NS_CLIENTSTATE_FREED)
++ {
+ client->mortal = false;
++ client->newstate = NS_CLIENTSTATE_READY;
+ }
+ UNLOCK(&client->interface->lock);
+ }
+
++ client->pipelined = false;
++
+ /*
+ * We don't need the client; send it to the inactive
+ * queue for recycling.
+@@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ client->pipelined = false;
+ }
+ if (TCP_CLIENT(client) && client->pipelined) {
++ /*
++ * We're pipelining. Replace the client; the
++ * the replacement can read the TCP socket looking
++ * for new messages and this client can process the
++ * current message asynchronously.
++ *
++ * There are now at least three clients using this
++ * TCP socket - one accepting new connections,
++ * one reading an existing connection to get new
++ * messages, and one answering the message already
++ * received.
++ */
+ result = ns_client_replace(client);
+ if (result != ISC_R_SUCCESS) {
+ client->pipelined = false;
+@@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->pipelined = false;
+ client->pipeline_refs = NULL;
+ client->tcpquota = NULL;
++ client->tcpattached = false;
+ client->recursionquota = NULL;
+ client->interface = NULL;
+ client->peeraddr_valid = false;
+@@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
+ "accept failed: %s",
+ isc_result_totext(nevent->result));
+- if (client->tcpquota != NULL) {
+- isc_quota_detach(&client->tcpquota);
+- }
++ tcpquota_disconnect(client);
+ }
+
+ if (exit_check(client))
+@@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ client->pipelined = false;
+ result = ns_client_replace(client);
+ if (result == ISC_R_SUCCESS &&
+- (client->sctx->keepresporder == NULL ||
++ (ns_g_server->keepresporder == NULL ||
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
+ ns_g_server->keepresporder)))
+ {
+@@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) {
+ * in named.conf. If we can't attach to it here, that means the TCP
+ * client quota has been exceeded.
+ */
+- result = isc_quota_attach(&client->sctx->tcpquota,
++ result = isc_quota_attach(&ns_g_server->tcpquota,
+ &client->tcpquota);
+ if (result != ISC_R_SUCCESS) {
+ bool exit;
+@@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) {
+ * interface to be starved, with no clients able
+ * to accept new connections.
+ *
+- * So, we check here to see if any other client
+- * is already servicing TCP queries on this
++ * So, we check here to see if any other clients
++ * are already servicing TCP queries on this
+ * interface (whether accepting, reading, or
+- * processing).
+- *
+- * If so, then it's okay *not* to call
+- * accept - we can let this client to go inactive
+- * and the other one handle the next connection
+- * when it's ready.
++ * processing). If there are at least two
++ * (one reading and one processing a request)
++ * then it's okay *not* to call accept - we
++ * can let this client go inactive and another
++ * one will resume accepting when it's done.
+ *
+- * But if not, then we need to be a little bit
+- * flexible about the quota. We allow *one* extra
+- * TCP client through, to ensure we're listening on
+- * every interface.
++ * If there aren't enough active clients on the
++ * interface, then we can be a little bit
++ * flexible about the quota. We'll allow *one*
++ * extra client through to ensure we're listening
++ * on every interface.
+ *
+- * (Note: In practice this means that the *real*
+- * TCP client quota is tcp-clients plus the number
+- * of interfaces.)
++ * (Note: In practice this means that the real
++ * TCP client quota is tcp-clients plus the
++ * number of listening interfaces plus 2.)
+ */
+ LOCK(&client->interface->lock);
+- exit = (client->interface->ntcpactive > 0);
++ exit = (client->interface->ntcpactive > 1);
+ UNLOCK(&client->interface->lock);
+
+ if (exit) {
+@@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) {
+ (void)exit_check(client);
+ return;
+ }
++
++ } else {
++ client->tcpattached = true;
+ }
+
+ /*
+@@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) {
+ UNEXPECTED_ERROR(__FILE__, __LINE__,
+ "isc_socket_accept() failed: %s",
+ isc_result_totext(result));
+- if (client->tcpquota != NULL) {
+- isc_quota_detach(&client->tcpquota);
++
++ tcpquota_disconnect(client);
++
++ if (client->tcpactive) {
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive--;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = false;
+ }
++
+ return;
+ }
+
+@@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) {
+ * once the connection is established.
+ *
+ * When the client object is shutting down after handling a TCP
+- * request (see exit_check()), it looks to see whether this value is
+- * non-zero. If so, that means another client has already called
+- * accept() and is waiting to establish the next connection, which
+- * means the first client is free to go inactive. Otherwise,
+- * the first client must come back and call accept() again; this
+- * guarantees there will always be at least one client listening
+- * for new TCP connections on each interface.
++ * request (see exit_check()), if this value is at least one, that
++ * means another client has called accept() and is waiting to
++ * establish the next connection. That means the client may be
++ * be free to become inactive; otherwise it may need to start
++ * listening for connections itself to prevent the interface
++ * going dead.
+ */
+ LOCK(&client->interface->lock);
+ client->interface->ntcpaccepting++;
+@@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) {
+ client->tcpsocket, client);
+ } else {
+ result = get_client(client->manager, client->interface,
+- client->dispatch, tcp);
++ client->dispatch, client, tcp);
++
++ /*
++ * The responsibility for listening for new requests is hereby
++ * transferred to the new client. Therefore, the old client
++ * should refrain from listening for any more requests.
++ */
++ client->mortal = true;
+ }
+ if (result != ISC_R_SUCCESS) {
+ return (result);
+ }
+
+- /*
+- * The responsibility for listening for new requests is hereby
+- * transferred to the new client. Therefore, the old client
+- * should refrain from listening for any more requests.
+- */
+- client->mortal = true;
+-
+ return (ISC_R_SUCCESS);
+ }
+
+@@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
+
+ static isc_result_t
+ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, bool tcp)
++ dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
+ {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc_event_t *ev;
+@@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ client->dscp = ifp->dscp;
+
+ if (tcp) {
++ client->tcpattached = false;
++ if (oldclient != NULL) {
++ client->tcpattached = oldclient->tcpattached;
++ }
++
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive++;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = true;
++
+ client->attributes |= NS_CLIENTATTR_TCP;
+ isc_socket_attach(ifp->tcpsocket,
+ &client->tcplistener);
+@@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+- client->tcpquota = &client->sctx->tcpquota;
++ client->tcpquota = &ns_g_server->tcpquota;
++ client->tcpattached = oldclient->tcpattached;
+
+ client->dscp = ifp->dscp;
+
+@@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ LOCK(&client->interface->lock);
+ client->interface->ntcpactive++;
+ UNLOCK(&client->interface->lock);
+-
+ client->tcpactive = true;
+
+ INSIST(client->tcpmsg_valid == false);
+@@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
+ MTRACE("createclients");
+
+ for (disp = 0; disp < n; disp++) {
+- result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
++ result = get_client(manager, ifp, ifp->udpdispatch[disp],
++ NULL, tcp);
+ if (result != ISC_R_SUCCESS)
+ break;
+ }
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index aeed9ccdda..e2c40acd28 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -9,8 +9,6 @@
+ * information regarding copyright ownership.
+ */
+
+-/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */
+-
+ #ifndef NAMED_CLIENT_H
+ #define NAMED_CLIENT_H 1
+
+@@ -136,6 +134,7 @@ struct ns_client {
+ bool pipelined; /*%< TCP queries not in sequence */
+ isc_refcount_t *pipeline_refs;
+ isc_quota_t *tcpquota;
++ bool tcpattached;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch
new file mode 100644
index 0000000000..987e75bc0e
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch
@@ -0,0 +1,911 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/c47ccf6]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From c47ccf630f147378568b33e8fdb7b754f228c346 Mon Sep 17 00:00:00 2001
+From: Evan Hunt <each@isc.org>
+Date: Fri, 5 Apr 2019 16:26:05 -0700
+Subject: [PATCH 5/6] refactor tcpquota and pipeline refs; allow special-case
+ overrun in isc_quota
+
+- if the TCP quota has been exceeded but there are no clients listening
+ for new connections on the interface, we can now force attachment to the
+ quota using isc_quota_force(), instead of carrying on with the quota not
+ attached.
+- the TCP client quota is now referenced via a reference-counted
+ 'ns_tcpconn' object, one of which is created whenever a client begins
+ listening for new connections, and attached to by members of that
+ client's pipeline group. when the last reference to the tcpconn
+ object is detached, it is freed and the TCP quota slot is released.
+- reduce code duplication by adding mark_tcp_active() function.
+- convert counters to atomic.
+
+(cherry picked from commit 7e8222378ca24f1302a0c1c638565050ab04681b)
+(cherry picked from commit 4939451275722bfda490ea86ca13e84f6bc71e46)
+(cherry picked from commit 13f7c918b8720d890408f678bd73c20e634539d9)
+---
+ bin/named/client.c | 444 +++++++++++--------------
+ bin/named/include/named/client.h | 12 +-
+ bin/named/include/named/interfacemgr.h | 6 +-
+ bin/named/interfacemgr.c | 1 +
+ lib/isc/include/isc/quota.h | 7 +
+ lib/isc/quota.c | 33 +-
+ lib/isc/win32/libisc.def.in | 1 +
+ 7 files changed, 236 insertions(+), 268 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 61e96dd28c..d826ab32bf 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -244,8 +244,7 @@ static void client_start(isc_task_t *task, isc_event_t *event);
+ static void client_request(isc_task_t *task, isc_event_t *event);
+ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
+ static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, ns_client_t *oldclient,
+- bool tcp);
++ dns_dispatch_t *disp, bool tcp);
+ static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ isc_socket_t *sock, ns_client_t *oldclient);
+ static inline bool
+@@ -301,16 +300,32 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
+ }
+
+ /*%
+- * Allocate a reference counter that will track the number of client structures
+- * using the TCP connection that 'client' called accept() for. This counter
+- * will be shared between all client structures associated with this TCP
+- * connection.
++ * Allocate a reference-counted object that will maintain a single pointer to
++ * the (also reference-counted) TCP client quota, shared between all the
++ * clients processing queries on a single TCP connection, so that all
++ * clients sharing the one socket will together consume only one slot in
++ * the 'tcp-clients' quota.
+ */
+-static void
+-pipeline_init(ns_client_t *client) {
+- isc_refcount_t *refs;
++static isc_result_t
++tcpconn_init(ns_client_t *client, bool force) {
++ isc_result_t result;
++ isc_quota_t *quota = NULL;
++ ns_tcpconn_t *tconn = NULL;
+
+- REQUIRE(client->pipeline_refs == NULL);
++ REQUIRE(client->tcpconn == NULL);
++
++ /*
++ * Try to attach to the quota first, so we won't pointlessly
++ * allocate memory for a tcpconn object if we can't get one.
++ */
++ if (force) {
++ result = isc_quota_force(&ns_g_server->tcpquota, &quota);
++ } else {
++ result = isc_quota_attach(&ns_g_server->tcpquota, &quota);
++ }
++ if (result != ISC_R_SUCCESS) {
++ return (result);
++ }
+
+ /*
+ * A global memory context is used for the allocation as different
+@@ -320,78 +335,80 @@ pipeline_init(ns_client_t *client) {
+ * contention here is expected to be negligible, given that this code
+ * is only executed for TCP connections.
+ */
+- refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
+- isc_refcount_init(refs, 1);
+- client->pipeline_refs = refs;
++ tconn = isc_mem_allocate(ns_g_mctx, sizeof(*tconn));
++
++ isc_refcount_init(&tconn->refs, 1);
++ tconn->tcpquota = quota;
++ quota = NULL;
++ tconn->pipelined = false;
++
++ client->tcpconn = tconn;
++
++ return (ISC_R_SUCCESS);
+ }
+
+ /*%
+- * Increase the count of client structures using the TCP connection that
+- * 'source' is associated with and put a pointer to that count in 'target',
+- * thus associating it with the same TCP connection.
++ * Increase the count of client structures sharing the TCP connection
++ * that 'source' is associated with; add a pointer to the same tcpconn
++ * to 'target', thus associating it with the same TCP connection.
+ */
+ static void
+-pipeline_attach(ns_client_t *source, ns_client_t *target) {
++tcpconn_attach(ns_client_t *source, ns_client_t *target) {
+ int refs;
+
+- REQUIRE(source->pipeline_refs != NULL);
+- REQUIRE(target->pipeline_refs == NULL);
++ REQUIRE(source->tcpconn != NULL);
++ REQUIRE(target->tcpconn == NULL);
++ REQUIRE(source->tcpconn->pipelined);
+
+- isc_refcount_increment(source->pipeline_refs, &refs);
++ isc_refcount_increment(&source->tcpconn->refs, &refs);
+ INSIST(refs > 1);
+- target->pipeline_refs = source->pipeline_refs;
++ target->tcpconn = source->tcpconn;
+ }
+
+ /*%
+- * Decrease the count of client structures using the TCP connection that
++ * Decrease the count of client structures sharing the TCP connection that
+ * 'client' is associated with. If this is the last client using this TCP
+- * connection, free the reference counter and return true; otherwise, return
+- * false.
++ * connection, we detach from the TCP quota and free the tcpconn
++ * object. Either way, client->tcpconn is set to NULL.
+ */
+-static bool
+-pipeline_detach(ns_client_t *client) {
+- isc_refcount_t *refcount;
++static void
++tcpconn_detach(ns_client_t *client) {
++ ns_tcpconn_t *tconn = NULL;
+ int refs;
+
+- REQUIRE(client->pipeline_refs != NULL);
+-
+- refcount = client->pipeline_refs;
+- client->pipeline_refs = NULL;
++ REQUIRE(client->tcpconn != NULL);
+
+- isc_refcount_decrement(refcount, refs);
++ tconn = client->tcpconn;
++ client->tcpconn = NULL;
+
++ isc_refcount_decrement(&tconn->refs, &refs);
+ if (refs == 0) {
+- isc_mem_free(ns_g_mctx, refs);
+- return (true);
++ isc_quota_detach(&tconn->tcpquota);
++ isc_mem_free(ns_g_mctx, tconn);
+ }
+-
+- return (false);
+ }
+
+-/*
+- * Detach a client from the TCP client quota if appropriate, and set
+- * the quota pointer to NULL.
+- *
+- * Sometimes when the TCP client quota is exhausted but there are no other
+- * clients servicing the interface, a client will be allowed to continue
+- * running despite not having been attached to the quota. In this event,
+- * the TCP quota was never attached to the client, so when the client (or
+- * associated pipeline group) shuts down, the quota must NOT be detached.
++/*%
++ * Mark a client as active and increment the interface's 'ntcpactive'
++ * counter, as a signal that there is at least one client servicing
++ * TCP queries for the interface. If we reach the TCP client quota at
++ * some point, this will be used to determine whether a quota overrun
++ * should be permitted.
+ *
+- * Otherwise, if the quota pointer is set, it should be detached. If not
+- * set at all, we just return without doing anything.
++ * Marking the client active with the 'tcpactive' flag ensures proper
++ * accounting, by preventing us from incrementing or decrementing
++ * 'ntcpactive' more than once per client.
+ */
+ static void
+-tcpquota_disconnect(ns_client_t *client) {
+- if (client->tcpquota == NULL) {
+- return;
+- }
+-
+- if (client->tcpattached) {
+- isc_quota_detach(&client->tcpquota);
+- client->tcpattached = false;
+- } else {
+- client->tcpquota = NULL;
++mark_tcp_active(ns_client_t *client, bool active) {
++ if (active && !client->tcpactive) {
++ isc_atomic_xadd(&client->interface->ntcpactive, 1);
++ client->tcpactive = active;
++ } else if (!active && client->tcpactive) {
++ uint32_t old =
++ isc_atomic_xadd(&client->interface->ntcpactive, -1);
++ INSIST(old > 0);
++ client->tcpactive = active;
+ }
+ }
+
+@@ -484,7 +501,8 @@ exit_check(ns_client_t *client) {
+ INSIST(client->recursionquota == NULL);
+
+ if (NS_CLIENTSTATE_READING == client->newstate) {
+- if (!client->pipelined) {
++ INSIST(client->tcpconn != NULL);
++ if (!client->tcpconn->pipelined) {
+ client_read(client);
+ client->newstate = NS_CLIENTSTATE_MAX;
+ return (true); /* We're done. */
+@@ -507,8 +525,8 @@ exit_check(ns_client_t *client) {
+ dns_tcpmsg_cancelread(&client->tcpmsg);
+ }
+
+- if (client->nreads != 0) {
+- /* Still waiting for read cancel completion. */
++ /* Still waiting for read cancel completion. */
++ if (client->nreads > 0) {
+ return (true);
+ }
+
+@@ -518,43 +536,45 @@ exit_check(ns_client_t *client) {
+ }
+
+ /*
+- * Detach from pipeline group and from TCP client quota,
+- * if appropriate.
++ * Soon the client will be ready to accept a new TCP
++ * connection or UDP request, but we may have enough
++ * clients doing that already. Check whether this client
++ * needs to remain active and allow it go inactive if
++ * not.
+ *
+- * - If no pipeline group is active, attempt to
+- * detach from the TCP client quota.
++ * UDP clients always go inactive at this point, but a TCP
++ * client may need to stay active and return to READY
++ * state if no other clients are available to listen
++ * for TCP requests on this interface.
+ *
+- * - If a pipeline group is active, detach from it;
+- * if the return code indicates that there no more
+- * clients left if this pipeline group, we also detach
+- * from the TCP client quota.
+- *
+- * - Otherwise we don't try to detach, we just set the
+- * TCP quota pointer to NULL if it wasn't NULL already.
+- *
+- * tcpquota_disconnect() will set tcpquota to NULL, either
+- * by detaching it or by assignment, depending on the
+- * needs of the client. See the comments on that function
+- * for further information.
++ * Regardless, if we're going to FREED state, that means
++ * the system is shutting down and we don't need to
++ * retain clients.
+ */
+- if (client->pipeline_refs == NULL || pipeline_detach(client)) {
+- tcpquota_disconnect(client);
+- } else {
+- client->tcpquota = NULL;
+- client->tcpattached = false;
++ if (client->mortal && TCP_CLIENT(client) &&
++ client->newstate != NS_CLIENTSTATE_FREED &&
++ !ns_g_clienttest &&
++ isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
++ {
++ /* Nobody else is accepting */
++ client->mortal = false;
++ client->newstate = NS_CLIENTSTATE_READY;
++ }
++
++ /*
++ * Detach from TCP connection and TCP client quota,
++ * if appropriate. If this is the last reference to
++ * the TCP connection in our pipeline group, the
++ * TCP quota slot will be released.
++ */
++ if (client->tcpconn) {
++ tcpconn_detach(client);
+ }
+
+ if (client->tcpsocket != NULL) {
+ CTRACE("closetcp");
+ isc_socket_detach(&client->tcpsocket);
+-
+- if (client->tcpactive) {
+- LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpactive > 0);
+- client->interface->ntcpactive--;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = false;
+- }
++ mark_tcp_active(client, false);
+ }
+
+ if (client->timerset) {
+@@ -567,35 +587,6 @@ exit_check(ns_client_t *client) {
+ client->peeraddr_valid = false;
+
+ client->state = NS_CLIENTSTATE_READY;
+- INSIST(client->recursionquota == NULL);
+-
+- /*
+- * Now the client is ready to accept a new TCP connection
+- * or UDP request, but we may have enough clients doing
+- * that already. Check whether this client needs to remain
+- * active and force it to go inactive if not.
+- *
+- * UDP clients go inactive at this point, but a TCP client
+- * may need to remain active and go into ready state if
+- * no other clients are available to listen for TCP
+- * requests on this interface or (in the case of pipelined
+- * clients) to read for additional messages on the current
+- * connection.
+- */
+- if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
+- LOCK(&client->interface->lock);
+- if ((client->interface->ntcpaccepting == 0 ||
+- (client->pipelined &&
+- client->interface->ntcpactive < 2)) &&
+- client->newstate != NS_CLIENTSTATE_FREED)
+- {
+- client->mortal = false;
+- client->newstate = NS_CLIENTSTATE_READY;
+- }
+- UNLOCK(&client->interface->lock);
+- }
+-
+- client->pipelined = false;
+
+ /*
+ * We don't need the client; send it to the inactive
+@@ -630,7 +621,7 @@ exit_check(ns_client_t *client) {
+ }
+
+ /* Still waiting for accept cancel completion. */
+- if (! (client->naccepts == 0)) {
++ if (client->naccepts > 0) {
+ return (true);
+ }
+
+@@ -641,7 +632,7 @@ exit_check(ns_client_t *client) {
+ }
+
+ /* Still waiting for recv cancel completion. */
+- if (! (client->nrecvs == 0)) {
++ if (client->nrecvs > 0) {
+ return (true);
+ }
+
+@@ -654,14 +645,7 @@ exit_check(ns_client_t *client) {
+ INSIST(client->recursionquota == NULL);
+ if (client->tcplistener != NULL) {
+ isc_socket_detach(&client->tcplistener);
+-
+- if (client->tcpactive) {
+- LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpactive > 0);
+- client->interface->ntcpactive--;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = false;
+- }
++ mark_tcp_active(client, false);
+ }
+ if (client->udpsocket != NULL) {
+ isc_socket_detach(&client->udpsocket);
+@@ -816,7 +800,7 @@ client_start(isc_task_t *task, isc_event_t *event) {
+ return;
+
+ if (TCP_CLIENT(client)) {
+- if (client->pipelined) {
++ if (client->tcpconn != NULL) {
+ client_read(client);
+ } else {
+ client_accept(client);
+@@ -2470,6 +2454,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ client->nrecvs--;
+ } else {
+ INSIST(TCP_CLIENT(client));
++ INSIST(client->tcpconn != NULL);
+ REQUIRE(event->ev_type == DNS_EVENT_TCPMSG);
+ REQUIRE(event->ev_sender == &client->tcpmsg);
+ buffer = &client->tcpmsg.buffer;
+@@ -2657,17 +2642,19 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ /*
+ * Pipeline TCP query processing.
+ */
+- if (client->message->opcode != dns_opcode_query) {
+- client->pipelined = false;
++ if (TCP_CLIENT(client) &&
++ client->message->opcode != dns_opcode_query)
++ {
++ client->tcpconn->pipelined = false;
+ }
+- if (TCP_CLIENT(client) && client->pipelined) {
++ if (TCP_CLIENT(client) && client->tcpconn->pipelined) {
+ /*
+ * We're pipelining. Replace the client; the
+- * the replacement can read the TCP socket looking
+- * for new messages and this client can process the
++ * replacement can read the TCP socket looking
++ * for new messages and this one can process the
+ * current message asynchronously.
+ *
+- * There are now at least three clients using this
++ * There will now be at least three clients using this
+ * TCP socket - one accepting new connections,
+ * one reading an existing connection to get new
+ * messages, and one answering the message already
+@@ -2675,7 +2662,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ */
+ result = ns_client_replace(client);
+ if (result != ISC_R_SUCCESS) {
+- client->pipelined = false;
++ client->tcpconn->pipelined = false;
+ }
+ }
+
+@@ -3233,10 +3220,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->signer = NULL;
+ dns_name_init(&client->signername, NULL);
+ client->mortal = false;
+- client->pipelined = false;
+- client->pipeline_refs = NULL;
+- client->tcpquota = NULL;
+- client->tcpattached = false;
++ client->tcpconn = NULL;
+ client->recursionquota = NULL;
+ client->interface = NULL;
+ client->peeraddr_valid = false;
+@@ -3341,9 +3325,10 @@ client_read(ns_client_t *client) {
+
+ static void
+ client_newconn(isc_task_t *task, isc_event_t *event) {
++ isc_result_t result;
+ ns_client_t *client = event->ev_arg;
+ isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
+- isc_result_t result;
++ uint32_t old;
+
+ REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
+ REQUIRE(NS_CLIENT_VALID(client));
+@@ -3363,10 +3348,8 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ INSIST(client->naccepts == 1);
+ client->naccepts--;
+
+- LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpaccepting > 0);
+- client->interface->ntcpaccepting--;
+- UNLOCK(&client->interface->lock);
++ old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
++ INSIST(old > 0);
+
+ /*
+ * We must take ownership of the new socket before the exit
+@@ -3399,7 +3382,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
+ "accept failed: %s",
+ isc_result_totext(nevent->result));
+- tcpquota_disconnect(client);
++ tcpconn_detach(client);
+ }
+
+ if (exit_check(client))
+@@ -3437,15 +3420,13 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ * telnetting to port 53 (once per CPU) will
+ * deny service to legitimate TCP clients.
+ */
+- client->pipelined = false;
+ result = ns_client_replace(client);
+ if (result == ISC_R_SUCCESS &&
+ (ns_g_server->keepresporder == NULL ||
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
+ ns_g_server->keepresporder)))
+ {
+- pipeline_init(client);
+- client->pipelined = true;
++ client->tcpconn->pipelined = true;
+ }
+
+ client_read(client);
+@@ -3462,78 +3443,59 @@ client_accept(ns_client_t *client) {
+ CTRACE("accept");
+
+ /*
+- * The tcpquota object can only be simultaneously referenced a
+- * pre-defined number of times; this is configured by 'tcp-clients'
+- * in named.conf. If we can't attach to it here, that means the TCP
+- * client quota has been exceeded.
++ * Set up a new TCP connection. This means try to attach to the
++ * TCP client quota (tcp-clients), but fail if we're over quota.
+ */
+- result = isc_quota_attach(&ns_g_server->tcpquota,
+- &client->tcpquota);
++ result = tcpconn_init(client, false);
+ if (result != ISC_R_SUCCESS) {
+- bool exit;
++ bool exit;
+
+- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
+- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
+- "no more TCP clients: %s",
+- isc_result_totext(result));
+-
+- /*
+- * We have exceeded the system-wide TCP client
+- * quota. But, we can't just block this accept
+- * in all cases, because if we did, a heavy TCP
+- * load on other interfaces might cause this
+- * interface to be starved, with no clients able
+- * to accept new connections.
+- *
+- * So, we check here to see if any other clients
+- * are already servicing TCP queries on this
+- * interface (whether accepting, reading, or
+- * processing). If there are at least two
+- * (one reading and one processing a request)
+- * then it's okay *not* to call accept - we
+- * can let this client go inactive and another
+- * one will resume accepting when it's done.
+- *
+- * If there aren't enough active clients on the
+- * interface, then we can be a little bit
+- * flexible about the quota. We'll allow *one*
+- * extra client through to ensure we're listening
+- * on every interface.
+- *
+- * (Note: In practice this means that the real
+- * TCP client quota is tcp-clients plus the
+- * number of listening interfaces plus 2.)
+- */
+- LOCK(&client->interface->lock);
+- exit = (client->interface->ntcpactive > 1);
+- UNLOCK(&client->interface->lock);
++ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
++ NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
++ "TCP client quota reached: %s",
++ isc_result_totext(result));
+
+- if (exit) {
+- client->newstate = NS_CLIENTSTATE_INACTIVE;
+- (void)exit_check(client);
+- return;
+- }
++ /*
++ * We have exceeded the system-wide TCP client quota. But,
++ * we can't just block this accept in all cases, because if
++ * we did, a heavy TCP load on other interfaces might cause
++ * this interface to be starved, with no clients able to
++ * accept new connections.
++ *
++ * So, we check here to see if any other clients are
++ * already servicing TCP queries on this interface (whether
++ * accepting, reading, or processing). If we find at least
++ * one, then it's okay *not* to call accept - we can let this
++ * client go inactive and another will take over when it's
++ * done.
++ *
++ * If there aren't enough active clients on the interface,
++ * then we can be a little bit flexible about the quota.
++ * We'll allow *one* extra client through to ensure we're
++ * listening on every interface; we do this by setting the
++ * 'force' option to tcpconn_init().
++ *
++ * (Note: In practice this means that the real TCP client
++ * quota is tcp-clients plus the number of listening
++ * interfaces plus 1.)
++ */
++ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
++ if (exit) {
++ client->newstate = NS_CLIENTSTATE_INACTIVE;
++ (void)exit_check(client);
++ return;
++ }
+
+- } else {
+- client->tcpattached = true;
++ result = tcpconn_init(client, true);
++ RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ }
+
+ /*
+- * By incrementing the interface's ntcpactive counter we signal
+- * that there is at least one client servicing TCP queries for the
+- * interface.
+- *
+- * We also make note of the fact in the client itself with the
+- * tcpactive flag. This ensures proper accounting by preventing
+- * us from accidentally incrementing or decrementing ntcpactive
+- * more than once per client object.
++ * If this client was set up using get_client() or get_worker(),
++ * then TCP is already marked active. However, if it was restarted
++ * from exit_check(), it might not be, so we take care of it now.
+ */
+- if (!client->tcpactive) {
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive++;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = true;
+- }
++ mark_tcp_active(client, true);
+
+ result = isc_socket_accept(client->tcplistener, client->task,
+ client_newconn, client);
+@@ -3549,15 +3511,8 @@ client_accept(ns_client_t *client) {
+ "isc_socket_accept() failed: %s",
+ isc_result_totext(result));
+
+- tcpquota_disconnect(client);
+-
+- if (client->tcpactive) {
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive--;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = false;
+- }
+-
++ tcpconn_detach(client);
++ mark_tcp_active(client, false);
+ return;
+ }
+
+@@ -3582,9 +3537,7 @@ client_accept(ns_client_t *client) {
+ * listening for connections itself to prevent the interface
+ * going dead.
+ */
+- LOCK(&client->interface->lock);
+- client->interface->ntcpaccepting++;
+- UNLOCK(&client->interface->lock);
++ isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
+ }
+
+ static void
+@@ -3655,24 +3608,25 @@ ns_client_replace(ns_client_t *client) {
+ REQUIRE(client->manager != NULL);
+
+ tcp = TCP_CLIENT(client);
+- if (tcp && client->pipelined) {
++ if (tcp && client->tcpconn != NULL && client->tcpconn->pipelined) {
+ result = get_worker(client->manager, client->interface,
+ client->tcpsocket, client);
+ } else {
+ result = get_client(client->manager, client->interface,
+- client->dispatch, client, tcp);
++ client->dispatch, tcp);
+
+- /*
+- * The responsibility for listening for new requests is hereby
+- * transferred to the new client. Therefore, the old client
+- * should refrain from listening for any more requests.
+- */
+- client->mortal = true;
+ }
+ if (result != ISC_R_SUCCESS) {
+ return (result);
+ }
+
++ /*
++ * The responsibility for listening for new requests is hereby
++ * transferred to the new client. Therefore, the old client
++ * should refrain from listening for any more requests.
++ */
++ client->mortal = true;
++
+ return (ISC_R_SUCCESS);
+ }
+
+@@ -3806,7 +3760,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
+
+ static isc_result_t
+ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
++ dns_dispatch_t *disp, bool tcp)
+ {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc_event_t *ev;
+@@ -3850,15 +3804,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ client->dscp = ifp->dscp;
+
+ if (tcp) {
+- client->tcpattached = false;
+- if (oldclient != NULL) {
+- client->tcpattached = oldclient->tcpattached;
+- }
+-
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive++;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = true;
++ mark_tcp_active(client, true);
+
+ client->attributes |= NS_CLIENTATTR_TCP;
+ isc_socket_attach(ifp->tcpsocket,
+@@ -3923,16 +3869,14 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+- client->tcpquota = &ns_g_server->tcpquota;
+- client->tcpattached = oldclient->tcpattached;
+
+ client->dscp = ifp->dscp;
+
+ client->attributes |= NS_CLIENTATTR_TCP;
+ client->mortal = true;
+
+- pipeline_attach(oldclient, client);
+- client->pipelined = true;
++ tcpconn_attach(oldclient, client);
++ mark_tcp_active(client, true);
+
+ isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
+ isc_socket_attach(sock, &client->tcpsocket);
+@@ -3940,11 +3884,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
+ client->peeraddr_valid = true;
+
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive++;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = true;
+-
+ INSIST(client->tcpmsg_valid == false);
+ dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
+ client->tcpmsg_valid = true;
+@@ -3970,8 +3909,7 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
+ MTRACE("createclients");
+
+ for (disp = 0; disp < n; disp++) {
+- result = get_client(manager, ifp, ifp->udpdispatch[disp],
+- NULL, tcp);
++ result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
+ if (result != ISC_R_SUCCESS)
+ break;
+ }
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index e2c40acd28..969ee4c08f 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -78,6 +78,13 @@
+ *** Types
+ ***/
+
++/*% reference-counted TCP connection object */
++typedef struct ns_tcpconn {
++ isc_refcount_t refs;
++ isc_quota_t *tcpquota;
++ bool pipelined;
++} ns_tcpconn_t;
++
+ /*% nameserver client structure */
+ struct ns_client {
+ unsigned int magic;
+@@ -131,10 +138,7 @@ struct ns_client {
+ dns_name_t signername; /*%< [T]SIG key name */
+ dns_name_t *signer; /*%< NULL if not valid sig */
+ bool mortal; /*%< Die after handling request */
+- bool pipelined; /*%< TCP queries not in sequence */
+- isc_refcount_t *pipeline_refs;
+- isc_quota_t *tcpquota;
+- bool tcpattached;
++ ns_tcpconn_t *tcpconn;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+
+diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
+index 61b08826a6..3535ef22a8 100644
+--- a/bin/named/include/named/interfacemgr.h
++++ b/bin/named/include/named/interfacemgr.h
+@@ -9,8 +9,6 @@
+ * information regarding copyright ownership.
+ */
+
+-/* $Id: interfacemgr.h,v 1.35 2011/07/28 23:47:58 tbox Exp $ */
+-
+ #ifndef NAMED_INTERFACEMGR_H
+ #define NAMED_INTERFACEMGR_H 1
+
+@@ -77,11 +75,11 @@ struct ns_interface {
+ /*%< UDP dispatchers. */
+ isc_socket_t * tcpsocket; /*%< TCP socket. */
+ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
+- int ntcpaccepting; /*%< Number of clients
++ int32_t ntcpaccepting; /*%< Number of clients
+ ready to accept new
+ TCP connections on this
+ interface */
+- int ntcpactive; /*%< Number of clients
++ int32_t ntcpactive; /*%< Number of clients
+ servicing TCP queries
+ (whether accepting or
+ connected) */
+diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
+index 955096ef47..d9f6df5802 100644
+--- a/bin/named/interfacemgr.c
++++ b/bin/named/interfacemgr.c
+@@ -388,6 +388,7 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
+ */
+ ifp->ntcpaccepting = 0;
+ ifp->ntcpactive = 0;
++
+ ifp->nudpdispatch = 0;
+
+ ifp->dscp = -1;
+diff --git a/lib/isc/include/isc/quota.h b/lib/isc/include/isc/quota.h
+index b9bf59877a..36c5830242 100644
+--- a/lib/isc/include/isc/quota.h
++++ b/lib/isc/include/isc/quota.h
+@@ -100,6 +100,13 @@ isc_quota_attach(isc_quota_t *quota, isc_quota_t **p);
+ * quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA).
+ */
+
++isc_result_t
++isc_quota_force(isc_quota_t *quota, isc_quota_t **p);
++/*%<
++ * Like isc_quota_attach, but will attach '*p' to the quota
++ * even if the hard quota has been exceeded.
++ */
++
+ void
+ isc_quota_detach(isc_quota_t **p);
+ /*%<
+diff --git a/lib/isc/quota.c b/lib/isc/quota.c
+index 3ddff0d875..556a61f21d 100644
+--- a/lib/isc/quota.c
++++ b/lib/isc/quota.c
+@@ -74,20 +74,39 @@ isc_quota_release(isc_quota_t *quota) {
+ UNLOCK(&quota->lock);
+ }
+
+-isc_result_t
+-isc_quota_attach(isc_quota_t *quota, isc_quota_t **p)
+-{
++static isc_result_t
++doattach(isc_quota_t *quota, isc_quota_t **p, bool force) {
+ isc_result_t result;
+- INSIST(p != NULL && *p == NULL);
++ REQUIRE(p != NULL && *p == NULL);
++
+ result = isc_quota_reserve(quota);
+- if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA)
++ if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) {
++ *p = quota;
++ } else if (result == ISC_R_QUOTA && force) {
++ /* attach anyway */
++ LOCK(&quota->lock);
++ quota->used++;
++ UNLOCK(&quota->lock);
++
+ *p = quota;
++ result = ISC_R_SUCCESS;
++ }
++
+ return (result);
+ }
+
++isc_result_t
++isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) {
++ return (doattach(quota, p, false));
++}
++
++isc_result_t
++isc_quota_force(isc_quota_t *quota, isc_quota_t **p) {
++ return (doattach(quota, p, true));
++}
++
+ void
+-isc_quota_detach(isc_quota_t **p)
+-{
++isc_quota_detach(isc_quota_t **p) {
+ INSIST(p != NULL && *p != NULL);
+ isc_quota_release(*p);
+ *p = NULL;
+diff --git a/lib/isc/win32/libisc.def.in b/lib/isc/win32/libisc.def.in
+index a82facec0f..7b9f23d776 100644
+--- a/lib/isc/win32/libisc.def.in
++++ b/lib/isc/win32/libisc.def.in
+@@ -519,6 +519,7 @@ isc_portset_removerange
+ isc_quota_attach
+ isc_quota_destroy
+ isc_quota_detach
++isc_quota_force
+ isc_quota_init
+ isc_quota_max
+ isc_quota_release
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch
new file mode 100644
index 0000000000..3821d18501
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch
@@ -0,0 +1,80 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/59434b9]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 59434b987e8eb436b08c24e559ee094c4e939daa Mon Sep 17 00:00:00 2001
+From: Evan Hunt <each@isc.org>
+Date: Fri, 5 Apr 2019 16:26:19 -0700
+Subject: [PATCH 6/6] restore allowance for tcp-clients < interfaces
+
+in the "refactor tcpquota and pipeline refs" commit, the counting
+of active interfaces was tightened in such a way that named could
+fail to listen on an interface if there were more interfaces than
+tcp-clients. when checking the quota to start accepting on an
+interface, if the number of active clients was above zero, then
+it was presumed that some other client was able to handle accepting
+new connections. this, however, ignored the fact that the current client
+could be included in that count, so if the quota was already exceeded
+before all the interfaces were listening, some interfaces would never
+listen.
+
+we now check whether the current client has been marked active; if so,
+then the number of active clients on the interface must be greater
+than 1, not 0.
+
+(cherry picked from commit 0b4e2cd4c3192ba88569dd344f542a8cc43742b5)
+(cherry picked from commit d01023aaac35543daffbdf48464e320150235d41)
+---
+ bin/named/client.c | 8 +++++---
+ doc/arm/Bv9ARM-book.xml | 3 ++-
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index d826ab32bf..845326abc0 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -3464,8 +3464,9 @@ client_accept(ns_client_t *client) {
+ *
+ * So, we check here to see if any other clients are
+ * already servicing TCP queries on this interface (whether
+- * accepting, reading, or processing). If we find at least
+- * one, then it's okay *not* to call accept - we can let this
++ * accepting, reading, or processing). If we find that at
++ * least one client other than this one is active, then
++ * it's okay *not* to call accept - we can let this
+ * client go inactive and another will take over when it's
+ * done.
+ *
+@@ -3479,7 +3480,8 @@ client_accept(ns_client_t *client) {
+ * quota is tcp-clients plus the number of listening
+ * interfaces plus 1.)
+ */
+- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
++ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
++ (client->tcpactive ? 1 : 0));
+ if (exit) {
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
+ (void)exit_check(client);
+diff --git a/doc/arm/Bv9ARM-book.xml b/doc/arm/Bv9ARM-book.xml
+index 381768d540..9c76d3cd6f 100644
+--- a/doc/arm/Bv9ARM-book.xml
++++ b/doc/arm/Bv9ARM-book.xml
+@@ -8493,7 +8493,8 @@ avoid-v6-udp-ports { 40000; range 50000 60000; };
+ <para>
+ The number of file descriptors reserved for TCP, stdio,
+ etc. This needs to be big enough to cover the number of
+- interfaces <command>named</command> listens on, <command>tcp-clients</command> as well as
++ interfaces <command>named</command> listens on plus
++ <command>tcp-clients</command>, as well as
+ to provide room for outgoing TCP queries and incoming zone
+ transfers. The default is <literal>512</literal>.
+ The minimum value is <literal>128</literal> and the
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch
new file mode 100644
index 0000000000..1a84eca58a
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch
@@ -0,0 +1,140 @@
+Backport commit to fix compile error on arm caused by commits which are
+to fix CVE-2018-5743.
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ef49780]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From ef49780d30d3ddc5735cfc32561b678a634fa72f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@sury.org>
+Date: Wed, 17 Apr 2019 15:22:27 +0200
+Subject: [PATCH] Replace atomic operations in bin/named/client.c with
+ isc_refcount reference counting
+
+---
+ bin/named/client.c | 18 +++++++-----------
+ bin/named/include/named/interfacemgr.h | 5 +++--
+ bin/named/interfacemgr.c | 7 +++++--
+ 3 files changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 845326abc0..29fecadca8 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -402,12 +402,10 @@ tcpconn_detach(ns_client_t *client) {
+ static void
+ mark_tcp_active(ns_client_t *client, bool active) {
+ if (active && !client->tcpactive) {
+- isc_atomic_xadd(&client->interface->ntcpactive, 1);
++ isc_refcount_increment0(&client->interface->ntcpactive, NULL);
+ client->tcpactive = active;
+ } else if (!active && client->tcpactive) {
+- uint32_t old =
+- isc_atomic_xadd(&client->interface->ntcpactive, -1);
+- INSIST(old > 0);
++ isc_refcount_decrement(&client->interface->ntcpactive, NULL);
+ client->tcpactive = active;
+ }
+ }
+@@ -554,7 +552,7 @@ exit_check(ns_client_t *client) {
+ if (client->mortal && TCP_CLIENT(client) &&
+ client->newstate != NS_CLIENTSTATE_FREED &&
+ !ns_g_clienttest &&
+- isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
++ isc_refcount_current(&client->interface->ntcpaccepting) == 0)
+ {
+ /* Nobody else is accepting */
+ client->mortal = false;
+@@ -3328,7 +3326,6 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ isc_result_t result;
+ ns_client_t *client = event->ev_arg;
+ isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
+- uint32_t old;
+
+ REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
+ REQUIRE(NS_CLIENT_VALID(client));
+@@ -3348,8 +3345,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ INSIST(client->naccepts == 1);
+ client->naccepts--;
+
+- old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
+- INSIST(old > 0);
++ isc_refcount_decrement(&client->interface->ntcpaccepting, NULL);
+
+ /*
+ * We must take ownership of the new socket before the exit
+@@ -3480,8 +3476,8 @@ client_accept(ns_client_t *client) {
+ * quota is tcp-clients plus the number of listening
+ * interfaces plus 1.)
+ */
+- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
+- (client->tcpactive ? 1 : 0));
++ exit = (isc_refcount_current(&client->interface->ntcpactive) >
++ (client->tcpactive ? 1U : 0U));
+ if (exit) {
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
+ (void)exit_check(client);
+@@ -3539,7 +3535,7 @@ client_accept(ns_client_t *client) {
+ * listening for connections itself to prevent the interface
+ * going dead.
+ */
+- isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
++ isc_refcount_increment0(&client->interface->ntcpaccepting, NULL);
+ }
+
+ static void
+diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
+index 3535ef22a8..6e10f210fd 100644
+--- a/bin/named/include/named/interfacemgr.h
++++ b/bin/named/include/named/interfacemgr.h
+@@ -45,6 +45,7 @@
+ #include <isc/magic.h>
+ #include <isc/mem.h>
+ #include <isc/socket.h>
++#include <isc/refcount.h>
+
+ #include <dns/result.h>
+
+@@ -75,11 +76,11 @@ struct ns_interface {
+ /*%< UDP dispatchers. */
+ isc_socket_t * tcpsocket; /*%< TCP socket. */
+ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
+- int32_t ntcpaccepting; /*%< Number of clients
++ isc_refcount_t ntcpaccepting; /*%< Number of clients
+ ready to accept new
+ TCP connections on this
+ interface */
+- int32_t ntcpactive; /*%< Number of clients
++ isc_refcount_t ntcpactive; /*%< Number of clients
+ servicing TCP queries
+ (whether accepting or
+ connected) */
+diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
+index d9f6df5802..135533be6b 100644
+--- a/bin/named/interfacemgr.c
++++ b/bin/named/interfacemgr.c
+@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
+ * connections will be handled in parallel even though there is
+ * only one client initially.
+ */
+- ifp->ntcpaccepting = 0;
+- ifp->ntcpactive = 0;
++ isc_refcount_init(&ifp->ntcpaccepting, 0);
++ isc_refcount_init(&ifp->ntcpactive, 0);
+
+ ifp->nudpdispatch = 0;
+
+@@ -618,6 +618,9 @@ ns_interface_destroy(ns_interface_t *ifp) {
+
+ ns_interfacemgr_detach(&ifp->mgr);
+
++ isc_refcount_destroy(&ifp->ntcpactive);
++ isc_refcount_destroy(&ifp->ntcpaccepting);
++
+ ifp->magic = 0;
+ isc_mem_put(mctx, ifp, sizeof(*ifp));
+ }
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb b/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
index 69b1174073..3e2412dfa4 100644
--- a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
+++ b/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
@@ -20,6 +20,14 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \
file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \
file://0001-avoid-start-failure-with-bind-user.patch \
+ file://0001-bind-fix-CVE-2019-6471.patch \
+ file://0001-fix-enforcement-of-tcp-clients-v1.patch \
+ file://0002-tcp-clients-could-still-be-exceeded-v2.patch \
+ file://0003-use-reference-counter-for-pipeline-groups-v3.patch \
+ file://0004-better-tcpquota-accounting-and-client-mortality-chec.patch \
+ file://0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch \
+ file://0006-restore-allowance-for-tcp-clients-interfaces.patch \
+ file://0007-Replace-atomic-operations-in-bin-named-client.c-with.patch \
"
SRC_URI[md5sum] = "8ddab4b61fa4516fe404679c74e37960"
@@ -133,7 +141,5 @@ PACKAGE_BEFORE_PN += "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3-
FILES_python3-bind = "${sbindir}/dnssec-coverage ${sbindir}/dnssec-checkds \
${sbindir}/dnssec-keymgr ${PYTHON_SITEPACKAGES_DIR}"
-RDEPENDS_${PN} = "bash"
-RDEPENDS_${PN}-utils = "bash"
RDEPENDS_${PN}-dev = ""
RDEPENDS_python3-bind = "python3-core python3-ply"
diff --git a/meta/recipes-connectivity/bluez5/bluez5.inc b/meta/recipes-connectivity/bluez5/bluez5.inc
index 6c79ed0606..1702323288 100644
--- a/meta/recipes-connectivity/bluez5/bluez5.inc
+++ b/meta/recipes-connectivity/bluez5/bluez5.inc
@@ -58,6 +58,7 @@ SRC_URI = "\
file://CVE-2018-10910.patch \
file://gcc9-fixes.patch \
file://0001-tools-Fix-build-after-y2038-changes-in-glibc.patch \
+ file://0001-tools-btpclient.c-include-signal.h.patch \
"
S = "${WORKDIR}/bluez-${PV}"
@@ -155,3 +156,5 @@ do_install_ptest() {
cp -r ${B}/unit/ ${D}${PTEST_PATH}
rm -f ${D}${PTEST_PATH}/unit/*.o
}
+
+RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-gconv-utf-16"
diff --git a/meta/recipes-connectivity/bluez5/bluez5/0001-Makefile.am-Fix-a-race-issue-for-tools.patch b/meta/recipes-connectivity/bluez5/bluez5/0001-Makefile.am-Fix-a-race-issue-for-tools.patch
index 3c227a8ea2..b6cb978393 100644
--- a/meta/recipes-connectivity/bluez5/bluez5/0001-Makefile.am-Fix-a-race-issue-for-tools.patch
+++ b/meta/recipes-connectivity/bluez5/bluez5/0001-Makefile.am-Fix-a-race-issue-for-tools.patch
@@ -1,32 +1,30 @@
-From 048e1844092cb4b3afd23f16fc2cc70dd2e122b7 Mon Sep 17 00:00:00 2001
-From: Robert Yang <liezhi.yang@windriver.com>
-Date: Mon, 24 Dec 2018 17:57:14 -0800
-Subject: [PATCH] Makefile.am: Fix a race issue for tools
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
-Fixed:
-cp ../bluez-5.50/tools/hid2hci.rules tools/97-hid2hci.rules
-cp: cannot create regular file tools/97-hid2hci.rules: No such file or directory
-make[1]: *** [tools/97-hid2hci.rules] Error 1
+From 117c41242c01e057295aed80ed973c6dc7e35fe2 Mon Sep 17 00:00:00 2001
+From: Ross Burton <ross.burton@intel.com>
+Date: Tue, 8 Oct 2019 11:01:56 +0100
+Subject: [PATCH BlueZ] Makefile.am: add missing mkdir in rules generation
-Upstream-Status: Submitted[https://www.spinics.net/lists/linux-bluetooth/msg78361.html]
-
-Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
+In parallel out-of-tree builds it's possible that tools/*.rules are
+generated before the target directory has been implicitly created. Solve this by
+creating the directory before writing into it.
---
Makefile.am | 1 +
1 file changed, 1 insertion(+)
diff --git a/Makefile.am b/Makefile.am
-index 6d1ff11..35a01f2 100644
+index 2ac28b23d..e7bcd2366 100644
--- a/Makefile.am
+++ b/Makefile.am
-@@ -504,6 +504,7 @@ src/builtin.h: src/genbuiltin $(builtin_sources)
+@@ -589,6 +589,7 @@ src/builtin.h: src/genbuiltin $(builtin_sources)
$(AM_V_GEN)$(srcdir)/src/genbuiltin $(builtin_modules) > $@
tools/%.rules:
-+ [ -e tools ] || $(MKDIR_P) tools
++ $(AM_V_at)$(MKDIR_P) tools
$(AM_V_GEN)cp $(srcdir)/$(subst 97-,,$@) $@
$(lib_libbluetooth_la_OBJECTS): $(local_headers)
--
-2.10.2
+2.20.1
diff --git a/meta/recipes-connectivity/bluez5/bluez5/0001-tools-btpclient.c-include-signal.h.patch b/meta/recipes-connectivity/bluez5/bluez5/0001-tools-btpclient.c-include-signal.h.patch
new file mode 100644
index 0000000000..620aaabc68
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/0001-tools-btpclient.c-include-signal.h.patch
@@ -0,0 +1,30 @@
+From 0b1766514f6847c7367fce07f19a750ec74c11a6 Mon Sep 17 00:00:00 2001
+From: Robert Yang <liezhi.yang@windriver.com>
+Date: Thu, 26 Sep 2019 16:19:34 +0800
+Subject: [PATCH] tools/btpclient.c: include signal.h
+
+Fix compile failure when configure --enable-btpclient:
+btpclient.c:2834:7: error: 'SIGINT' undeclared (first use in this function)
+
+Upstream-Status: Backport [A subset of the full fix that went upstream]
+
+Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
+---
+ tools/btpclient.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/btpclient.c b/tools/btpclient.c
+index b217df5..aece7fe 100644
+--- a/tools/btpclient.c
++++ b/tools/btpclient.c
+@@ -29,6 +29,7 @@
+ #include <stdlib.h>
+ #include <assert.h>
+ #include <getopt.h>
++#include <signal.h>
+
+ #include <ell/ell.h>
+
+--
+2.7.4
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2018-10910.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2018-10910.patch
index b4b1846c45..2a78077443 100644
--- a/meta/recipes-connectivity/bluez5/bluez5/CVE-2018-10910.patch
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2018-10910.patch
@@ -1,3 +1,8 @@
+From 977321f2c7f974ea68a3d90df296c66189a3f254 Mon Sep 17 00:00:00 2001
+From: Lei Maohui <leimaohui@cn.fujitsu.com>
+Date: Fri, 21 Jun 2019 17:57:35 +0900
+Subject: [PATCH] CVE-2018-10910
+
A bug in Bluez may allow for the Bluetooth Discoverable state being set to on
when no Bluetooth agent is registered with the system. This situation could
lead to the unauthorized pairing of certain Bluetooth devices without any
@@ -21,14 +26,24 @@ DiscoverableTimeout property:
[bluetooth]# discoverable-timeout 180
Changing discoverable-timeout 180 succeeded
---
- client/main.c | 43 +++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 43 insertions(+)
+ client/main.c | 82 +++++++++++++++++++++++++++++++++-
+ doc/adapter-api.txt | 6 +++
+ src/adapter.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++------
+ 3 files changed, 198 insertions(+), 15 deletions(-)
diff --git a/client/main.c b/client/main.c
-index 87323d8f7..59820c6d9 100644
+index 87323d8..1a66a3a 100644
--- a/client/main.c
+++ b/client/main.c
-@@ -1061,6 +1061,47 @@ static void cmd_discoverable(int argc, char *argv[])
+@@ -877,6 +877,7 @@ static void cmd_show(int argc, char *argv[])
+ print_property(proxy, "Class");
+ print_property(proxy, "Powered");
+ print_property(proxy, "Discoverable");
++ print_property(proxy, "DiscoverableTimeout");
+ print_property(proxy, "Pairable");
+ print_uuids(proxy);
+ print_property(proxy, "Modalias");
+@@ -1061,6 +1062,47 @@ static void cmd_discoverable(int argc, char *argv[])
return bt_shell_noninteractive_quit(EXIT_FAILURE);
}
@@ -76,7 +91,87 @@ index 87323d8f7..59820c6d9 100644
static void cmd_agent(int argc, char *argv[])
{
dbus_bool_t enable;
-@@ -2549,6 +2590,8 @@ static const struct bt_shell_menu main_menu = {
+@@ -1124,6 +1166,7 @@ static struct set_discovery_filter_args {
+ char **uuids;
+ size_t uuids_len;
+ dbus_bool_t duplicate;
++ dbus_bool_t discoverable;
+ bool set;
+ } filter = {
+ .rssi = DISTANCE_VAL_INVALID,
+@@ -1163,6 +1206,11 @@ static void set_discovery_filter_setup(DBusMessageIter *iter, void *user_data)
+ DBUS_TYPE_BOOLEAN,
+ &args->duplicate);
+
++ if (args->discoverable)
++ g_dbus_dict_append_entry(&dict, "Discoverable",
++ DBUS_TYPE_BOOLEAN,
++ &args->discoverable);
++
+ dbus_message_iter_close_container(iter, &dict);
+ }
+
+@@ -1320,6 +1368,26 @@ static void cmd_scan_filter_duplicate_data(int argc, char *argv[])
+ filter.set = false;
+ }
+
++static void cmd_scan_filter_discoverable(int argc, char *argv[])
++{
++ if (argc < 2 || !strlen(argv[1])) {
++ bt_shell_printf("Discoverable: %s\n",
++ filter.discoverable ? "on" : "off");
++ return bt_shell_noninteractive_quit(EXIT_SUCCESS);
++ }
++
++ if (!strcmp(argv[1], "on"))
++ filter.discoverable = true;
++ else if (!strcmp(argv[1], "off"))
++ filter.discoverable = false;
++ else {
++ bt_shell_printf("Invalid option: %s\n", argv[1]);
++ return bt_shell_noninteractive_quit(EXIT_FAILURE);
++ }
++
++ filter.set = false;
++}
++
+ static void filter_clear_uuids(void)
+ {
+ g_strfreev(filter.uuids);
+@@ -1348,6 +1416,11 @@ static void filter_clear_duplicate(void)
+ filter.duplicate = false;
+ }
+
++static void filter_clear_discoverable(void)
++{
++ filter.discoverable = false;
++}
++
+ struct clear_entry {
+ const char *name;
+ void (*clear) (void);
+@@ -1359,6 +1432,7 @@ static const struct clear_entry filter_clear[] = {
+ { "pathloss", filter_clear_pathloss },
+ { "transport", filter_clear_transport },
+ { "duplicate-data", filter_clear_duplicate },
++ { "discoverable", filter_clear_discoverable },
+ {}
+ };
+
+@@ -2468,7 +2542,11 @@ static const struct bt_shell_menu scan_menu = {
+ { "duplicate-data", "[on/off]", cmd_scan_filter_duplicate_data,
+ "Set/Get duplicate data filter",
+ NULL },
+- { "clear", "[uuids/rssi/pathloss/transport/duplicate-data]",
++ { "discoverable", "[on/off]", cmd_scan_filter_discoverable,
++ "Set/Get discoverable filter",
++ NULL },
++ { "clear",
++ "[uuids/rssi/pathloss/transport/duplicate-data/discoverable]",
+ cmd_scan_filter_clear,
+ "Clears discovery filter.",
+ filter_clear_generator },
+@@ -2549,6 +2627,8 @@ static const struct bt_shell_menu main_menu = {
{ "discoverable", "<on/off>", cmd_discoverable,
"Set controller discoverable mode",
NULL },
@@ -85,74 +180,36 @@ index 87323d8f7..59820c6d9 100644
{ "agent", "<on/off/capability>", cmd_agent,
"Enable/disable agent with given capability",
capability_generator},
---
-2.17.1
-
-Subject: [PATCH BlueZ 2/4] client: Make show command print DiscoverableTimeout
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-25 10:20:33
-Message-ID: 20180725102035.19439-2-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-Controller XX:XX:XX:XX:XX:XX (public)
- Name: Vudentz's T460s
- Alias: Intel-1
- Class: 0x004c010c
- Powered: yes
- Discoverable: no
- DiscoverableTimeout: 0x00000000
- Pairable: yes
- UUID: Headset AG (00001112-0000-1000-8000-00805f9b34fb)
- UUID: Generic Attribute Profile (00001801-0000-1000-8000-00805f9b34fb)
- UUID: A/V Remote Control (0000110e-0000-1000-8000-00805f9b34fb)
- UUID: SIM Access (0000112d-0000-1000-8000-00805f9b34fb)
- UUID: Generic Access Profile (00001800-0000-1000-8000-00805f9b34fb)
- UUID: PnP Information (00001200-0000-1000-8000-00805f9b34fb)
- UUID: A/V Remote Control Target (0000110c-0000-1000-8000-00805f9b34fb)
- UUID: Audio Source (0000110a-0000-1000-8000-00805f9b34fb)
- UUID: Audio Sink (0000110b-0000-1000-8000-00805f9b34fb)
- UUID: Headset (00001108-0000-1000-8000-00805f9b34fb)
- Modalias: usb:v1D6Bp0246d0532
- Discovering: no
----
- client/main.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/client/main.c b/client/main.c
-index 59820c6d9..6f472d050 100644
---- a/client/main.c
-+++ b/client/main.c
-@@ -877,6 +877,7 @@ static void cmd_show(int argc, char *argv[])
- print_property(proxy, "Class");
- print_property(proxy, "Powered");
- print_property(proxy, "Discoverable");
-+ print_property(proxy, "DiscoverableTimeout");
- print_property(proxy, "Pairable");
- print_uuids(proxy);
- print_property(proxy, "Modalias");
---
-2.17.1
-Subject: [PATCH BlueZ 3/4] adapter: Track pending settings
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-25 10:20:34
-Message-ID: 20180725102035.19439-3-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-This tracks settings being changed and in case the settings is already
-pending considered it to be done.
----
- src/adapter.c | 30 ++++++++++++++++++++++++++++--
- 1 file changed, 28 insertions(+), 2 deletions(-)
-
+diff --git a/doc/adapter-api.txt b/doc/adapter-api.txt
+index d14d0ca..4791af2 100644
+--- a/doc/adapter-api.txt
++++ b/doc/adapter-api.txt
+@@ -113,6 +113,12 @@ Methods void StartDiscovery()
+ generated for either ManufacturerData and
+ ServiceData everytime they are discovered.
+
++ bool Discoverable (Default: false)
++
++ Make adapter discoverable while discovering,
++ if the adapter is already discoverable this
++ setting this filter won't do anything.
++
+ When discovery filter is set, Device objects will be
+ created as new devices with matching criteria are
+ discovered regardless of they are connectable or
diff --git a/src/adapter.c b/src/adapter.c
-index af340fd6e..20c20f9e9 100644
+index af340fd..822bd34 100644
--- a/src/adapter.c
+++ b/src/adapter.c
-@@ -196,6 +196,7 @@ struct btd_adapter {
+@@ -157,6 +157,7 @@ struct discovery_filter {
+ int16_t rssi;
+ GSList *uuids;
+ bool duplicate;
++ bool discoverable;
+ };
+
+ struct watch_client {
+@@ -196,6 +197,7 @@ struct btd_adapter {
char *name; /* controller device name */
char *short_name; /* controller short name */
uint32_t supported_settings; /* controller supported settings */
@@ -160,7 +217,15 @@ index af340fd6e..20c20f9e9 100644
uint32_t current_settings; /* current controller settings */
char *path; /* adapter object path */
-@@ -509,8 +510,10 @@ static void settings_changed(struct btd_adapter *adapter, uint32_t settings)
+@@ -213,6 +215,7 @@ struct btd_adapter {
+
+ bool discovering; /* discovering property state */
+ bool filtered_discovery; /* we are doing filtered discovery */
++ bool filtered_discoverable; /* we are doing filtered discovery */
+ bool no_scan_restart_delay; /* when this flag is set, restart scan
+ * without delay */
+ uint8_t discovery_type; /* current active discovery type */
+@@ -509,8 +512,10 @@ static void settings_changed(struct btd_adapter *adapter, uint32_t settings)
changed_mask = adapter->current_settings ^ settings;
adapter->current_settings = settings;
@@ -171,7 +236,7 @@ index af340fd6e..20c20f9e9 100644
if (changed_mask & MGMT_SETTING_POWERED) {
g_dbus_emit_property_changed(dbus_conn, adapter->path,
-@@ -596,10 +599,31 @@ static bool set_mode(struct btd_adapter *adapter, uint16_t opcode,
+@@ -596,10 +601,31 @@ static bool set_mode(struct btd_adapter *adapter, uint16_t opcode,
uint8_t mode)
{
struct mgmt_mode cp;
@@ -203,160 +268,11 @@ index af340fd6e..20c20f9e9 100644
DBG("sending set mode command for index %u", adapter->dev_id);
if (mgmt_send(adapter->mgmt, opcode,
-@@ -2739,13 +2763,15 @@ static void property_set_mode(struct btd_adapter *adapter, uint32_t setting,
- else
- current_enable = FALSE;
-
-- if (enable == current_enable) {
-+ if (enable == current_enable || adapter->pending_settings & setting) {
- g_dbus_pending_property_success(id);
- return;
- }
-
- mode = (enable == TRUE) ? 0x01 : 0x00;
-
-+ adapter->pending_settings |= setting;
-+
- switch (setting) {
- case MGMT_SETTING_POWERED:
- opcode = MGMT_OP_SET_POWERED;
-@@ -2798,7 +2824,7 @@ static void property_set_mode(struct btd_adapter *adapter, uint32_t setting,
- data->id = id;
-
- if (mgmt_send(adapter->mgmt, opcode, adapter->dev_id, len, param,
-- property_set_mode_complete, data, g_free) > 0)
-+ property_set_mode_complete, data, g_free) > 0)
- return;
-
- g_free(data);
---
-2.17.1
-Subject: [PATCH BlueZ 4/4] adapter: Check pending when setting DiscoverableTimeout
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-25 10:20:35
-Message-ID: 20180725102035.19439-4-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-This makes DiscoverableTimeout check if discoverable is already pending
-and don't attempt to set it once again which may cause discoverable to
-be re-enabled when in fact the application just want to set the timeout
-alone.
----
- src/adapter.c | 14 +++++++++++++-
- 1 file changed, 13 insertions(+), 1 deletion(-)
-
-diff --git a/src/adapter.c b/src/adapter.c
-index 20c20f9e9..f92c897c7 100644
---- a/src/adapter.c
-+++ b/src/adapter.c
-@@ -2901,6 +2901,7 @@ static void property_set_discoverable_timeout(
- GDBusPendingPropertySet id, void *user_data)
- {
- struct btd_adapter *adapter = user_data;
-+ bool enabled;
- dbus_uint32_t value;
-
- dbus_message_iter_get_basic(iter, &value);
-@@ -2914,8 +2915,19 @@ static void property_set_discoverable_timeout(
- g_dbus_emit_property_changed(dbus_conn, adapter->path,
- ADAPTER_INTERFACE, "DiscoverableTimeout");
-
-+ if (adapter->pending_settings & MGMT_SETTING_DISCOVERABLE) {
-+ if (adapter->current_settings & MGMT_SETTING_DISCOVERABLE)
-+ enabled = false;
-+ else
-+ enabled = true;
-+ } else {
-+ if (adapter->current_settings & MGMT_SETTING_DISCOVERABLE)
-+ enabled = true;
-+ else
-+ enabled = false;
-+ }
-
-- if (adapter->current_settings & MGMT_SETTING_DISCOVERABLE)
-+ if (enabled)
- set_discoverable(adapter, 0x01, adapter->discoverable_timeout);
- }
-
---
-2.17.1
-Subject: [PATCH BlueZ 1/5] doc/adapter-api: Add Discoverable option to SetDiscoveryFilter
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-26 14:17:19
-Message-ID: 20180726141723.20199-1-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-This enables the client to set its discoverable setting while
-discovering which is very typical situation as usually the setings
-application would allow incoming pairing request while scanning, so
-this would reduce the number of calls setting Discoverable and
-DiscoverableTimeout and restoring after done with discovery.
----
- doc/adapter-api.txt | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/doc/adapter-api.txt b/doc/adapter-api.txt
-index d14d0ca50..4791af2c7 100644
---- a/doc/adapter-api.txt
-+++ b/doc/adapter-api.txt
-@@ -113,6 +113,12 @@ Methods void StartDiscovery()
- generated for either ManufacturerData and
- ServiceData everytime they are discovered.
-
-+ bool Discoverable (Default: false)
-+
-+ Make adapter discoverable while discovering,
-+ if the adapter is already discoverable this
-+ setting this filter won't do anything.
-+
- When discovery filter is set, Device objects will be
- created as new devices with matching criteria are
- discovered regardless of they are connectable or
---
-2.17.1
-Subject: [PATCH BlueZ 2/5] adapter: Discovery filter discoverable
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-26 14:17:20
-Message-ID: 20180726141723.20199-2-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-This implements the discovery filter discoverable and tracks which
-clients had enabled it and restores the settings when the last client
-enabling it exits.
----
- src/adapter.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 54 insertions(+), 2 deletions(-)
-
-diff --git a/src/adapter.c b/src/adapter.c
-index f92c897c7..bd9edddc6 100644
---- a/src/adapter.c
-+++ b/src/adapter.c
-@@ -157,6 +157,7 @@ struct discovery_filter {
- int16_t rssi;
- GSList *uuids;
- bool duplicate;
-+ bool discoverable;
- };
-
- struct watch_client {
-@@ -214,6 +215,7 @@ struct btd_adapter {
-
- bool discovering; /* discovering property state */
- bool filtered_discovery; /* we are doing filtered discovery */
-+ bool filtered_discoverable; /* we are doing filtered discovery */
- bool no_scan_restart_delay; /* when this flag is set, restart scan
- * without delay */
- uint8_t discovery_type; /* current active discovery type */
-@@ -1842,6 +1844,16 @@ static void discovery_free(void *user_data)
+@@ -1818,7 +1844,17 @@ static void discovery_free(void *user_data)
g_free(client);
}
+-static void discovery_remove(struct watch_client *client)
+static bool set_filtered_discoverable(struct btd_adapter *adapter, bool enable)
+{
+ if (adapter->filtered_discoverable == enable)
@@ -367,13 +283,15 @@ index f92c897c7..bd9edddc6 100644
+ return set_discoverable(adapter, enable, 0);
+}
+
- static void discovery_remove(struct watch_client *client)
++static void discovery_remove(struct watch_client *client, bool exit)
{
struct btd_adapter *adapter = client->adapter;
-@@ -1854,6 +1866,22 @@ static void discovery_remove(struct watch_client *client)
+
+@@ -1830,7 +1866,27 @@ static void discovery_remove(struct watch_client *client)
adapter->discovery_list = g_slist_remove(adapter->discovery_list,
client);
+- discovery_free(client);
+ if (adapter->filtered_discoverable &&
+ client->discovery_filter->discoverable) {
+ GSList *l;
@@ -390,10 +308,72 @@ index f92c897c7..bd9edddc6 100644
+ set_filtered_discoverable(adapter, false);
+ }
+
- discovery_free(client);
++ if (!exit && client->discovery_filter)
++ adapter->set_filter_list = g_slist_prepend(
++ adapter->set_filter_list, client);
++ else
++ discovery_free(client);
/*
-@@ -2224,6 +2252,15 @@ static DBusMessage *start_discovery(DBusConnection *conn,
+ * If there are other client discoveries in progress, then leave
+@@ -1859,8 +1915,11 @@ static void stop_discovery_complete(uint8_t status, uint16_t length,
+ goto done;
+ }
+
+- if (client->msg)
++ if (client->msg) {
+ g_dbus_send_reply(dbus_conn, client->msg, DBUS_TYPE_INVALID);
++ dbus_message_unref(client->msg);
++ client->msg = NULL;
++ }
+
+ adapter->discovery_type = 0x00;
+ adapter->discovery_enable = 0x00;
+@@ -1873,7 +1932,7 @@ static void stop_discovery_complete(uint8_t status, uint16_t length,
+ trigger_passive_scanning(adapter);
+
+ done:
+- discovery_remove(client);
++ discovery_remove(client, false);
+ }
+
+ static int compare_sender(gconstpointer a, gconstpointer b)
+@@ -2094,14 +2153,14 @@ static int update_discovery_filter(struct btd_adapter *adapter)
+ return -EINPROGRESS;
+ }
+
+-static int discovery_stop(struct watch_client *client)
++static int discovery_stop(struct watch_client *client, bool exit)
+ {
+ struct btd_adapter *adapter = client->adapter;
+ struct mgmt_cp_stop_discovery cp;
+
+ /* Check if there are more client discovering */
+ if (g_slist_next(adapter->discovery_list)) {
+- discovery_remove(client);
++ discovery_remove(client, exit);
+ update_discovery_filter(adapter);
+ return 0;
+ }
+@@ -2111,7 +2170,7 @@ static int discovery_stop(struct watch_client *client)
+ * and so it is enough to send out the signal and just return.
+ */
+ if (adapter->discovery_enable == 0x00) {
+- discovery_remove(client);
++ discovery_remove(client, exit);
+ adapter->discovering = false;
+ g_dbus_emit_property_changed(dbus_conn, adapter->path,
+ ADAPTER_INTERFACE, "Discovering");
+@@ -2136,7 +2195,7 @@ static void discovery_disconnect(DBusConnection *conn, void *user_data)
+
+ DBG("owner %s", client->owner);
+
+- discovery_stop(client);
++ discovery_stop(client, true);
+ }
+
+ /*
+@@ -2200,6 +2259,15 @@ static DBusMessage *start_discovery(DBusConnection *conn,
adapter->set_filter_list, client);
adapter->discovery_list = g_slist_prepend(
adapter->discovery_list, client);
@@ -409,7 +389,7 @@ index f92c897c7..bd9edddc6 100644
goto done;
}
-@@ -2348,6 +2385,17 @@ static bool parse_duplicate_data(DBusMessageIter *value,
+@@ -2324,6 +2392,17 @@ static bool parse_duplicate_data(DBusMessageIter *value,
return true;
}
@@ -427,7 +407,7 @@ index f92c897c7..bd9edddc6 100644
struct filter_parser {
const char *name;
bool (*func)(DBusMessageIter *iter, struct discovery_filter *filter);
-@@ -2357,6 +2405,7 @@ struct filter_parser {
+@@ -2333,6 +2412,7 @@ struct filter_parser {
{ "Pathloss", parse_pathloss },
{ "Transport", parse_transport },
{ "DuplicateData", parse_duplicate_data },
@@ -435,7 +415,7 @@ index f92c897c7..bd9edddc6 100644
{ }
};
-@@ -2396,6 +2445,7 @@ static bool parse_discovery_filter_dict(struct btd_adapter *adapter,
+@@ -2372,6 +2452,7 @@ static bool parse_discovery_filter_dict(struct btd_adapter *adapter,
(*filter)->rssi = DISTANCE_VAL_INVALID;
(*filter)->type = get_scan_type(adapter);
(*filter)->duplicate = false;
@@ -443,7 +423,7 @@ index f92c897c7..bd9edddc6 100644
dbus_message_iter_init(msg, &iter);
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_ARRAY ||
-@@ -2441,8 +2491,10 @@ static bool parse_discovery_filter_dict(struct btd_adapter *adapter,
+@@ -2417,8 +2498,10 @@ static bool parse_discovery_filter_dict(struct btd_adapter *adapter,
goto invalid_args;
DBG("filtered discovery params: transport: %d rssi: %d pathloss: %d "
@@ -456,250 +436,70 @@ index f92c897c7..bd9edddc6 100644
return true;
---
-2.17.1
-Subject: [PATCH BlueZ 3/5] client: Add scan.discoverable command
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-26 14:17:21
-Message-ID: 20180726141723.20199-3-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-This adds discoverable command to scan menu which can be used to set
-if adapter should become discoverable while scanning:
-
-[bluetooth]# scan.discoverable on
-[bluetooth]# scan on
-SetDiscoveryFilter success
-[CHG] Controller XX:XX:XX:XX:XX:XX Discoverable: yes
-Discovery started
-[CHG] Controller XX:XX:XX:XX:XX:XX Discovering: yes
-[bluetooth]# scan off
-Discovery stopped
-[CHG] Controller XX:XX:XX:XX:XX:XX Discoverable: no
----
- client/main.c | 29 +++++++++++++++++++++++++++++
- 1 file changed, 29 insertions(+)
-
-diff --git a/client/main.c b/client/main.c
-index 6f472d050..6e6f6d2fb 100644
---- a/client/main.c
-+++ b/client/main.c
-@@ -1166,6 +1166,7 @@ static struct set_discovery_filter_args {
- char **uuids;
- size_t uuids_len;
- dbus_bool_t duplicate;
-+ dbus_bool_t discoverable;
- bool set;
- } filter = {
- .rssi = DISTANCE_VAL_INVALID,
-@@ -1205,6 +1206,11 @@ static void set_discovery_filter_setup(DBusMessageIter *iter, void *user_data)
- DBUS_TYPE_BOOLEAN,
- &args->duplicate);
+@@ -2510,7 +2593,7 @@ static DBusMessage *stop_discovery(DBusConnection *conn,
+ if (client->msg)
+ return btd_error_busy(msg);
-+ if (args->discoverable)
-+ g_dbus_dict_append_entry(&dict, "Discoverable",
-+ DBUS_TYPE_BOOLEAN,
-+ &args->discoverable);
-+
- dbus_message_iter_close_container(iter, &dict);
- }
+- err = discovery_stop(client);
++ err = discovery_stop(client, false);
+ switch (err) {
+ case 0:
+ return dbus_message_new_method_return(msg);
+@@ -2739,13 +2822,15 @@ static void property_set_mode(struct btd_adapter *adapter, uint32_t setting,
+ else
+ current_enable = FALSE;
-@@ -1362,6 +1368,26 @@ static void cmd_scan_filter_duplicate_data(int argc, char *argv[])
- filter.set = false;
- }
+- if (enable == current_enable) {
++ if (enable == current_enable || adapter->pending_settings & setting) {
+ g_dbus_pending_property_success(id);
+ return;
+ }
-+static void cmd_scan_filter_discoverable(int argc, char *argv[])
-+{
-+ if (argc < 2 || !strlen(argv[1])) {
-+ bt_shell_printf("Discoverable: %s\n",
-+ filter.discoverable ? "on" : "off");
-+ return bt_shell_noninteractive_quit(EXIT_SUCCESS);
-+ }
-+
-+ if (!strcmp(argv[1], "on"))
-+ filter.discoverable = true;
-+ else if (!strcmp(argv[1], "off"))
-+ filter.discoverable = false;
-+ else {
-+ bt_shell_printf("Invalid option: %s\n", argv[1]);
-+ return bt_shell_noninteractive_quit(EXIT_FAILURE);
-+ }
-+
-+ filter.set = false;
-+}
-+
- static void filter_clear_uuids(void)
- {
- g_strfreev(filter.uuids);
-@@ -2510,6 +2536,9 @@ static const struct bt_shell_menu scan_menu = {
- { "duplicate-data", "[on/off]", cmd_scan_filter_duplicate_data,
- "Set/Get duplicate data filter",
- NULL },
-+ { "discoverable", "[on/off]", cmd_scan_filter_discoverable,
-+ "Set/Get discoverable filter",
-+ NULL },
- { "clear", "[uuids/rssi/pathloss/transport/duplicate-data]",
- cmd_scan_filter_clear,
- "Clears discovery filter.",
---
-2.17.1
-Subject: [PATCH BlueZ 4/5] client: Add scan.clear discoverable
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-26 14:17:22
-Message-ID: 20180726141723.20199-4-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-This implements scan.clear for discoverable filter.
----
- client/main.c | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/client/main.c b/client/main.c
-index 6e6f6d2fb..1a66a3ab4 100644
---- a/client/main.c
-+++ b/client/main.c
-@@ -1416,6 +1416,11 @@ static void filter_clear_duplicate(void)
- filter.duplicate = false;
- }
+ mode = (enable == TRUE) ? 0x01 : 0x00;
-+static void filter_clear_discoverable(void)
-+{
-+ filter.discoverable = false;
-+}
++ adapter->pending_settings |= setting;
+
- struct clear_entry {
- const char *name;
- void (*clear) (void);
-@@ -1427,6 +1432,7 @@ static const struct clear_entry filter_clear[] = {
- { "pathloss", filter_clear_pathloss },
- { "transport", filter_clear_transport },
- { "duplicate-data", filter_clear_duplicate },
-+ { "discoverable", filter_clear_discoverable },
- {}
- };
+ switch (setting) {
+ case MGMT_SETTING_POWERED:
+ opcode = MGMT_OP_SET_POWERED;
+@@ -2798,7 +2883,7 @@ static void property_set_mode(struct btd_adapter *adapter, uint32_t setting,
+ data->id = id;
-@@ -2539,7 +2545,8 @@ static const struct bt_shell_menu scan_menu = {
- { "discoverable", "[on/off]", cmd_scan_filter_discoverable,
- "Set/Get discoverable filter",
- NULL },
-- { "clear", "[uuids/rssi/pathloss/transport/duplicate-data]",
-+ { "clear",
-+ "[uuids/rssi/pathloss/transport/duplicate-data/discoverable]",
- cmd_scan_filter_clear,
- "Clears discovery filter.",
- filter_clear_generator },
---
-2.17.1
-Subject: [PATCH BlueZ 5/5] adapter: Fix not keeping discovery filters
-From: Luiz Augusto von Dentz <luiz.dentz () gmail ! com>
-Date: 2018-07-26 14:17:23
-Message-ID: 20180726141723.20199-5-luiz.dentz () gmail ! com
-[Download RAW message or body]
-
-From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
-
-If the discovery has been stopped and the client has set filters those
-should be put back into filter list since the client may still be
-interested in using them the next time it start a scanning.
----
- src/adapter.c | 25 ++++++++++++++++---------
- 1 file changed, 16 insertions(+), 9 deletions(-)
-
-diff --git a/src/adapter.c b/src/adapter.c
-index bd9edddc6..822bd3472 100644
---- a/src/adapter.c
-+++ b/src/adapter.c
-@@ -1854,7 +1854,7 @@ static bool set_filtered_discoverable(struct btd_adapter *adapter, bool enable)
- return set_discoverable(adapter, enable, 0);
- }
+ if (mgmt_send(adapter->mgmt, opcode, adapter->dev_id, len, param,
+- property_set_mode_complete, data, g_free) > 0)
++ property_set_mode_complete, data, g_free) > 0)
+ return;
--static void discovery_remove(struct watch_client *client)
-+static void discovery_remove(struct watch_client *client, bool exit)
+ g_free(data);
+@@ -2875,6 +2960,7 @@ static void property_set_discoverable_timeout(
+ GDBusPendingPropertySet id, void *user_data)
{
- struct btd_adapter *adapter = client->adapter;
-
-@@ -1882,7 +1882,11 @@ static void discovery_remove(struct watch_client *client)
- set_filtered_discoverable(adapter, false);
- }
-
-- discovery_free(client);
-+ if (!exit && client->discovery_filter)
-+ adapter->set_filter_list = g_slist_prepend(
-+ adapter->set_filter_list, client);
-+ else
-+ discovery_free(client);
+ struct btd_adapter *adapter = user_data;
++ bool enabled;
+ dbus_uint32_t value;
- /*
- * If there are other client discoveries in progress, then leave
-@@ -1911,8 +1915,11 @@ static void stop_discovery_complete(uint8_t status, uint16_t length,
- goto done;
- }
+ dbus_message_iter_get_basic(iter, &value);
+@@ -2888,8 +2974,19 @@ static void property_set_discoverable_timeout(
+ g_dbus_emit_property_changed(dbus_conn, adapter->path,
+ ADAPTER_INTERFACE, "DiscoverableTimeout");
-- if (client->msg)
-+ if (client->msg) {
- g_dbus_send_reply(dbus_conn, client->msg, DBUS_TYPE_INVALID);
-+ dbus_message_unref(client->msg);
-+ client->msg = NULL;
++ if (adapter->pending_settings & MGMT_SETTING_DISCOVERABLE) {
++ if (adapter->current_settings & MGMT_SETTING_DISCOVERABLE)
++ enabled = false;
++ else
++ enabled = true;
++ } else {
++ if (adapter->current_settings & MGMT_SETTING_DISCOVERABLE)
++ enabled = true;
++ else
++ enabled = false;
+ }
- adapter->discovery_type = 0x00;
- adapter->discovery_enable = 0x00;
-@@ -1925,7 +1932,7 @@ static void stop_discovery_complete(uint8_t status, uint16_t length,
- trigger_passive_scanning(adapter);
-
- done:
-- discovery_remove(client);
-+ discovery_remove(client, false);
- }
-
- static int compare_sender(gconstpointer a, gconstpointer b)
-@@ -2146,14 +2153,14 @@ static int update_discovery_filter(struct btd_adapter *adapter)
- return -EINPROGRESS;
- }
-
--static int discovery_stop(struct watch_client *client)
-+static int discovery_stop(struct watch_client *client, bool exit)
- {
- struct btd_adapter *adapter = client->adapter;
- struct mgmt_cp_stop_discovery cp;
-
- /* Check if there are more client discovering */
- if (g_slist_next(adapter->discovery_list)) {
-- discovery_remove(client);
-+ discovery_remove(client, exit);
- update_discovery_filter(adapter);
- return 0;
- }
-@@ -2163,7 +2170,7 @@ static int discovery_stop(struct watch_client *client)
- * and so it is enough to send out the signal and just return.
- */
- if (adapter->discovery_enable == 0x00) {
-- discovery_remove(client);
-+ discovery_remove(client, exit);
- adapter->discovering = false;
- g_dbus_emit_property_changed(dbus_conn, adapter->path,
- ADAPTER_INTERFACE, "Discovering");
-@@ -2188,7 +2195,7 @@ static void discovery_disconnect(DBusConnection *conn, void *user_data)
-
- DBG("owner %s", client->owner);
-
-- discovery_stop(client);
-+ discovery_stop(client, true);
+- if (adapter->current_settings & MGMT_SETTING_DISCOVERABLE)
++ if (enabled)
+ set_discoverable(adapter, 0x01, adapter->discoverable_timeout);
}
- /*
-@@ -2586,7 +2593,7 @@ static DBusMessage *stop_discovery(DBusConnection *conn,
- if (client->msg)
- return btd_error_busy(msg);
-
-- err = discovery_stop(client);
-+ err = discovery_stop(client, false);
- switch (err) {
- case 0:
- return dbus_message_new_method_return(msg);
--
-2.17.1
+2.7.4
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/out-of-tree.patch b/meta/recipes-connectivity/bluez5/bluez5/out-of-tree.patch
index 3ee79d7047..76ed779258 100644
--- a/meta/recipes-connectivity/bluez5/bluez5/out-of-tree.patch
+++ b/meta/recipes-connectivity/bluez5/bluez5/out-of-tree.patch
@@ -7,7 +7,7 @@ In parallel out-of-tree builds it's possible that obexd/src/builtin.h is
generated before the target directory has been implicitly created. Solve this by
creating the directory before writing into it.
-Upstream-Status: Submitted
+Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@intel.com>
---
Makefile.obexd | 1 +
diff --git a/meta/recipes-connectivity/connman/connman.inc b/meta/recipes-connectivity/connman/connman.inc
index ee00479926..fb38ab4fc1 100644
--- a/meta/recipes-connectivity/connman/connman.inc
+++ b/meta/recipes-connectivity/connman/connman.inc
@@ -59,7 +59,7 @@ INITSCRIPT_NAME = "connman"
INITSCRIPT_PARAMS = "start 05 5 2 3 . stop 22 0 1 6 ."
python __anonymous () {
- systemd_packages = "${PN}"
+ systemd_packages = "${PN} ${PN}-wait-online"
pkgconfig = d.getVar('PACKAGECONFIG')
if ('openvpn' or 'vpnc' or 'l2tp' or 'pptp') in pkgconfig.split():
systemd_packages += " ${PN}-vpn"
diff --git a/meta/recipes-connectivity/dhcp/dhcp.inc b/meta/recipes-connectivity/dhcp/dhcp.inc
index 18bbaf8841..c4697beaf1 100644
--- a/meta/recipes-connectivity/dhcp/dhcp.inc
+++ b/meta/recipes-connectivity/dhcp/dhcp.inc
@@ -43,7 +43,7 @@ INITSCRIPT_PACKAGES = "dhcp-server"
INITSCRIPT_NAME_dhcp-server = "dhcp-server"
INITSCRIPT_PARAMS_dhcp-server = "defaults"
-TARGET_CFLAGS += "-D_GNU_SOURCE"
+CFLAGS += "-D_GNU_SOURCE"
EXTRA_OECONF = "--with-srv-lease-file=${localstatedir}/lib/dhcp/dhcpd.leases \
--with-srv6-lease-file=${localstatedir}/lib/dhcp/dhcpd6.leases \
--with-cli-lease-file=${localstatedir}/lib/dhcp/dhclient.leases \
diff --git a/meta/recipes-connectivity/iproute2/iproute2.inc b/meta/recipes-connectivity/iproute2/iproute2.inc
index d72871767f..fc31b8444e 100644
--- a/meta/recipes-connectivity/iproute2/iproute2.inc
+++ b/meta/recipes-connectivity/iproute2/iproute2.inc
@@ -9,14 +9,15 @@ LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=eb723b61539feef013de476e68b5c50a \
file://ip/ip.c;beginline=3;endline=8;md5=689d691d0410a4b64d3899f8d6e31817"
-DEPENDS = "flex-native bison-native iptables elfutils libcap"
+DEPENDS = "flex-native bison-native iptables libcap"
inherit update-alternatives bash-completion pkgconfig
CLEANBROKEN = "1"
-PACKAGECONFIG ??= "tipc"
+PACKAGECONFIG ??= "tipc elf"
PACKAGECONFIG[tipc] = ",,libmnl,"
+PACKAGECONFIG[elf] = ",,elfutils,"
EXTRA_OEMAKE = "CC='${CC}' KERNEL_INCLUDE=${STAGING_INCDIR} DOCDIR=${docdir}/iproute2 SUBDIRS='lib tc ip bridge misc genl \
${@bb.utils.contains('PACKAGECONFIG', 'tipc', 'tipc', '', d)}' SBINDIR='${base_sbindir}' LIBDIR='${libdir}'"
diff --git a/meta/recipes-connectivity/iw/iw_5.0.1.bb b/meta/recipes-connectivity/iw/iw_5.3.bb
index 87b21e668d..f7f13f5a30 100644
--- a/meta/recipes-connectivity/iw/iw_5.0.1.bb
+++ b/meta/recipes-connectivity/iw/iw_5.3.bb
@@ -14,8 +14,8 @@ SRC_URI = "http://www.kernel.org/pub/software/network/iw/${BP}.tar.gz \
file://separate-objdir.patch \
"
-SRC_URI[md5sum] = "a0a17ab1b20132c716bba9a4f9974ba6"
-SRC_URI[sha256sum] = "36fc7592dde7bec934df83cd53ef1f2c08ceec5cd58d07eb8f71cc6e8464013c"
+SRC_URI[md5sum] = "6d4d1c0ee34f3a7bda0e6aafcd7aaf31"
+SRC_URI[sha256sum] = "175abbfce86348c0b70e778c13a94c0bfc9abc7a506d2bd608261583aeedf64a"
inherit pkgconfig
@@ -26,7 +26,6 @@ EXTRA_OEMAKE = "\
'SBINDIR=${sbindir}' \
'MANDIR=${mandir}' \
"
-B = "${WORKDIR}/build"
do_install() {
oe_runmake 'DESTDIR=${D}' install
diff --git a/meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch b/meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch
deleted file mode 100644
index 01773834c7..0000000000
--- a/meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From aafa3512b7b742f5e66a5543e41974cc5e7eebfa Mon Sep 17 00:00:00 2001
-From: maxice8 <thinkabit.ukim@gmail.com>
-Date: Sun, 22 Jul 2018 18:54:17 -0300
-Subject: [PATCH] pcap-usb-linux.c: add missing limits.h for musl systems.
-
-fix compilation on musl libc systems like Void Linux and Alpine.
-
-Upstream-Status: Backport [https://github.com/the-tcpdump-group/libpcap/commit/d557c98a16dc254aaff03762b694fe624e180bea]
-
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
----
- pcap-usb-linux.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/pcap-usb-linux.c b/pcap-usb-linux.c
-index 6f8adf65..b92c05ea 100644
---- a/pcap-usb-linux.c
-+++ b/pcap-usb-linux.c
-@@ -50,6 +50,7 @@
- #include <stdlib.h>
- #include <unistd.h>
- #include <fcntl.h>
-+#include <limits.h>
- #include <string.h>
- #include <dirent.h>
- #include <byteswap.h>
---
-2.17.1
-
diff --git a/meta/recipes-connectivity/libpcap/libpcap_1.9.0.bb b/meta/recipes-connectivity/libpcap/libpcap_1.9.1.bb
index 77bc31af7e..35bb5650b3 100644
--- a/meta/recipes-connectivity/libpcap/libpcap_1.9.0.bb
+++ b/meta/recipes-connectivity/libpcap/libpcap_1.9.1.bb
@@ -5,16 +5,15 @@ security monitoring and network debugging."
HOMEPAGE = "http://www.tcpdump.org/"
BUGTRACKER = "http://sourceforge.net/tracker/?group_id=53067&atid=469577"
SECTION = "libs/network"
-LICENSE = "BSD"
+LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=5eb289217c160e2920d2e35bddc36453 \
file://pcap.h;beginline=1;endline=32;md5=39af3510e011f34b8872f120b1dc31d2"
DEPENDS = "flex-native bison-native"
SRC_URI = "https://www.tcpdump.org/release/${BP}.tar.gz \
- file://0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch \
"
-SRC_URI[md5sum] = "dffd65cb14406ab9841f421732eb0f33"
-SRC_URI[sha256sum] = "2edb88808e5913fdaa8e9c1fcaf272e19b2485338742b5074b9fe44d68f37019"
+SRC_URI[md5sum] = "21af603d9a591c7d96a6457021d84e6c"
+SRC_URI[sha256sum] = "635237637c5b619bcceba91900666b64d56ecb7be63f298f601ec786ce087094"
inherit autotools binconfig-disabled pkgconfig
diff --git a/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Fix-include-order-between-config.h-and-stat.h.patch b/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Fix-include-order-between-config.h-and-stat.h.patch
new file mode 100644
index 0000000000..7b0f93535f
--- /dev/null
+++ b/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Fix-include-order-between-config.h-and-stat.h.patch
@@ -0,0 +1,156 @@
+From 2fbc62e2a13fc22b6ae4910e295a2c10fb790486 Mon Sep 17 00:00:00 2001
+From: Zoltan Karcagi <zkr7432@gmail.com>
+Date: Mon, 12 Aug 2019 13:27:16 -0400
+Subject: [PATCH] Fix include order between config.h and stat.h
+
+At least on Arch linux ARM, the definition of struct stat in stat.h depends
+on __USE_FILE_OFFSET64. This symbol comes from config.h when defined,
+therefore config.h must always be included before stat.h. Fix all
+occurrences where the order is wrong by moving config.h to the top.
+
+This fixes the client side error "Stale file handle" when mounting from
+a server running Arch Linux ARM.
+
+Signed-off-by: Zoltan Karcagi <zkr7432@gmail.com>
+Signed-off-by: Steve Dickson <steved@redhat.com>
+
+Upstream-Status: Backport
+[http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=commit;h=2fbc62e2a13fc22b6ae4910e295a2c10fb790486]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ support/misc/nfsd_path.c | 5 ++++-
+ support/misc/xstat.c | 5 ++++-
+ support/nfs/conffile.c | 8 +++++++-
+ utils/blkmapd/device-discovery.c | 8 ++++----
+ utils/idmapd/idmapd.c | 8 ++++----
+ 5 files changed, 23 insertions(+), 11 deletions(-)
+
+diff --git a/support/misc/nfsd_path.c b/support/misc/nfsd_path.c
+index 84e4802..f078a66 100644
+--- a/support/misc/nfsd_path.c
++++ b/support/misc/nfsd_path.c
+@@ -1,3 +1,7 @@
++#ifdef HAVE_CONFIG_H
++#include <config.h>
++#endif
++
+ #include <errno.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+@@ -5,7 +9,6 @@
+ #include <stdlib.h>
+ #include <unistd.h>
+
+-#include "config.h"
+ #include "conffile.h"
+ #include "xmalloc.h"
+ #include "xlog.h"
+diff --git a/support/misc/xstat.c b/support/misc/xstat.c
+index fa04788..4c997ee 100644
+--- a/support/misc/xstat.c
++++ b/support/misc/xstat.c
+@@ -1,3 +1,7 @@
++#ifdef HAVE_CONFIG_H
++#include <config.h>
++#endif
++
+ #include <errno.h>
+ #include <sys/types.h>
+ #include <fcntl.h>
+@@ -5,7 +9,6 @@
+ #include <sys/sysmacros.h>
+ #include <unistd.h>
+
+-#include "config.h"
+ #include "xstat.h"
+
+ #ifdef HAVE_FSTATAT
+diff --git a/support/nfs/conffile.c b/support/nfs/conffile.c
+index b6400be..6ba8a35 100644
+--- a/support/nfs/conffile.c
++++ b/support/nfs/conffile.c
+@@ -500,7 +500,7 @@ conf_readfile(const char *path)
+
+ if ((stat (path, &sb) == 0) || (errno != ENOENT)) {
+ char *new_conf_addr = NULL;
+- size_t sz = sb.st_size;
++ off_t sz;
+ int fd = open (path, O_RDONLY, 0);
+
+ if (fd == -1) {
+@@ -517,6 +517,11 @@ conf_readfile(const char *path)
+
+ /* only after we have the lock, check the file size ready to read it */
+ sz = lseek(fd, 0, SEEK_END);
++ if (sz < 0) {
++ xlog_warn("conf_readfile: unable to determine file size: %s",
++ strerror(errno));
++ goto fail;
++ }
+ lseek(fd, 0, SEEK_SET);
+
+ new_conf_addr = malloc(sz+1);
+@@ -2162,6 +2167,7 @@ conf_write(const char *filename, const char *section, const char *arg,
+ ret = 0;
+
+ cleanup:
++ flush_outqueue(&inqueue, NULL);
+ flush_outqueue(&outqueue, NULL);
+
+ if (buff)
+diff --git a/utils/blkmapd/device-discovery.c b/utils/blkmapd/device-discovery.c
+index e811703..f5f9b10 100644
+--- a/utils/blkmapd/device-discovery.c
++++ b/utils/blkmapd/device-discovery.c
+@@ -26,6 +26,10 @@
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif /* HAVE_CONFIG_H */
++
+ #include <sys/sysmacros.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+@@ -51,10 +55,6 @@
+ #include <errno.h>
+ #include <libdevmapper.h>
+
+-#ifdef HAVE_CONFIG_H
+-#include "config.h"
+-#endif /* HAVE_CONFIG_H */
+-
+ #include "device-discovery.h"
+ #include "xcommon.h"
+ #include "nfslib.h"
+diff --git a/utils/idmapd/idmapd.c b/utils/idmapd/idmapd.c
+index 62e37b8..267acea 100644
+--- a/utils/idmapd/idmapd.c
++++ b/utils/idmapd/idmapd.c
+@@ -34,6 +34,10 @@
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif /* HAVE_CONFIG_H */
++
+ #include <sys/types.h>
+ #include <sys/time.h>
+ #include <sys/inotify.h>
+@@ -62,10 +66,6 @@
+ #include <libgen.h>
+ #include <nfsidmap.h>
+
+-#ifdef HAVE_CONFIG_H
+-#include "config.h"
+-#endif /* HAVE_CONFIG_H */
+-
+ #include "xlog.h"
+ #include "conffile.h"
+ #include "queue.h"
+--
+2.7.4
+
diff --git a/meta/recipes-connectivity/nfs-utils/nfs-utils/nfsserver b/meta/recipes-connectivity/nfs-utils/nfs-utils/nfsserver
index d5e9c38a9c..0f5747cc6d 100644
--- a/meta/recipes-connectivity/nfs-utils/nfs-utils/nfsserver
+++ b/meta/recipes-connectivity/nfs-utils/nfs-utils/nfsserver
@@ -107,7 +107,7 @@ stop_nfsd(){
#FIXME: need to create the /var/lib/nfs/... directories
case "$1" in
start)
- exportfs -r
+ test -r /etc/exports && exportfs -r
start_nfsd "$NFS_SERVERS"
start_mountd
test -r /etc/exports && exportfs -a;;
diff --git a/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb b/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb
index 8b673c8f5f..eb32bccb57 100644
--- a/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb
+++ b/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb
@@ -9,7 +9,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=95f3a93a5c3c7888de623b46ea085a84"
# util-linux for libblkid
DEPENDS = "libcap libevent util-linux sqlite3 libtirpc"
-RDEPENDS_${PN} = "${PN}-client bash"
+RDEPENDS_${PN} = "${PN}-client"
RRECOMMENDS_${PN} = "kernel-module-nfsd"
inherit useradd
@@ -32,6 +32,7 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/nfs-utils/${PV}/nfs-utils-${PV}.tar.x
file://clang-format-string.patch \
file://0001-Makefile.am-fix-undefined-function-for-libnsm.a.patch \
file://0001-Don-t-build-tools-with-CC_FOR_BUILD.patch \
+ file://0001-Fix-include-order-between-config.h-and-stat.h.patch \
"
SRC_URI_append_libc-glibc = " file://0001-configure.ac-Do-not-fatalize-Wmissing-prototypes.patch"
SRC_URI_append_libc-musl = " file://nfs-utils-musl-res_querydomain.patch"
diff --git a/meta/recipes-connectivity/ofono/ofono_1.30.bb b/meta/recipes-connectivity/ofono/ofono_1.30.bb
deleted file mode 100644
index c916cb1b22..0000000000
--- a/meta/recipes-connectivity/ofono/ofono_1.30.bb
+++ /dev/null
@@ -1,9 +0,0 @@
-require ofono.inc
-
-SRC_URI = "\
- ${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \
- file://ofono \
- file://0001-mbim-add-an-optional-TEMP_FAILURE_RETRY-macro-copy.patch \
-"
-SRC_URI[md5sum] = "2b1ce11a4db1f4b5c8cd96eb7e96ba0c"
-SRC_URI[sha256sum] = "8079735efc5d7f33be9e792e791f2f7ff75c31ce67d477b994673e32319eec5c"
diff --git a/meta/recipes-connectivity/ofono/ofono.inc b/meta/recipes-connectivity/ofono/ofono_1.31.bb
index bdbb0b5bc6..7d0976ad7f 100644
--- a/meta/recipes-connectivity/ofono/ofono.inc
+++ b/meta/recipes-connectivity/ofono/ofono_1.31.bb
@@ -1,28 +1,35 @@
-HOMEPAGE = "http://www.ofono.org"
-SUMMARY = "open source telephony"
+SUMMARY = "open source telephony"
DESCRIPTION = "oFono is a stack for mobile telephony devices on Linux. oFono supports speaking to telephony devices through specific drivers, or with generic AT commands."
-LICENSE = "GPLv2"
+HOMEPAGE = "http://www.ofono.org"
+BUGTRACKER = "https://01.org/jira/browse/OF"
+LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=eb723b61539feef013de476e68b5c50a \
file://src/ofono.h;beginline=1;endline=20;md5=3ce17d5978ef3445def265b98899c2ee"
+DEPENDS = "dbus glib-2.0 udev mobile-broadband-provider-info ell"
-inherit autotools pkgconfig update-rc.d systemd gobject-introspection-data
+SRC_URI = "\
+ ${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \
+ file://ofono \
+ file://0001-mbim-add-an-optional-TEMP_FAILURE_RETRY-macro-copy.patch \
+"
+SRC_URI[md5sum] = "1c26340e3c6ed132cc812595081bb3dc"
+SRC_URI[sha256sum] = "a15c5d28096c10eb30e47a68b6dc2e7c4a5a99d7f4cfedf0b69624f33d859e9b"
-DEPENDS = "dbus glib-2.0 udev mobile-broadband-provider-info ell"
+inherit autotools pkgconfig update-rc.d systemd gobject-introspection-data
INITSCRIPT_NAME = "ofono"
INITSCRIPT_PARAMS = "defaults 22"
+SYSTEMD_SERVICE_${PN} = "ofono.service"
PACKAGECONFIG ??= "\
${@bb.utils.filter('DISTRO_FEATURES', 'systemd', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', 'bluez', '', d)} \
- "
+"
PACKAGECONFIG[systemd] = "--with-systemdunitdir=${systemd_unitdir}/system/,--with-systemdunitdir="
PACKAGECONFIG[bluez] = "--enable-bluetooth, --disable-bluetooth, bluez5"
EXTRA_OECONF += "--enable-test --enable-external-ell"
-SYSTEMD_SERVICE_${PN} = "ofono.service"
-
do_install_append() {
install -d ${D}${sysconfdir}/init.d/
install -m 0755 ${WORKDIR}/ofono ${D}${sysconfdir}/init.d/ofono
@@ -30,10 +37,14 @@ do_install_append() {
PACKAGES =+ "${PN}-tests"
-RDEPENDS_${PN} += "dbus"
-RRECOMMENDS_${PN} += "kernel-module-tun mobile-broadband-provider-info"
-
FILES_${PN} += "${systemd_unitdir}"
FILES_${PN}-tests = "${libdir}/${BPN}/test"
-RDEPENDS_${PN}-tests = "python3-core python3-dbus"
-RDEPENDS_${PN}-tests += "${@bb.utils.contains('GI_DATA_ENABLED', 'True', 'python3-pygobject', '', d)}"
+
+RDEPENDS_${PN} += "dbus"
+RDEPENDS_${PN}-tests = "\
+ python3-core \
+ python3-dbus \
+ ${@bb.utils.contains('GI_DATA_ENABLED', 'True', 'python3-pygobject', '', d)} \
+"
+
+RRECOMMENDS_${PN} += "kernel-module-tun mobile-broadband-provider-info"
diff --git a/meta/recipes-connectivity/openssh/openssh/0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch b/meta/recipes-connectivity/openssh/openssh/0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch
new file mode 100644
index 0000000000..3265be3485
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch
@@ -0,0 +1,40 @@
+From 2014fad3d28090b59d2f8a0971166c06e5fa6da6 Mon Sep 17 00:00:00 2001
+From: Hongxu Jia <hongxu.jia@windriver.com>
+Date: Fri, 18 Oct 2019 14:56:58 +0800
+Subject: [PATCH] upstream: fix integer overflow in XMSS private key parsing.
+
+Reported by Adam Zabrocki via SecuriTeam's SSH program.
+
+Note that this code is experimental and not compiled by default.
+
+ok markus@
+
+OpenBSD-Commit-ID: cd0361896d15e8a1bac495ac583ff065ffca2be1
+
+Signed-off-by: "djm@openbsd.org" <djm@openbsd.org>
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/a546b17bbaeb12beac4c9aeed56f74a42b18a93a]
+CVE: CVE-2019-16905
+
+Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+---
+ sshkey-xmss.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sshkey-xmss.c b/sshkey-xmss.c
+index aaae702..c57681a 100644
+--- a/sshkey-xmss.c
++++ b/sshkey-xmss.c
+@@ -977,7 +977,8 @@ sshkey_xmss_decrypt_state(const struct sshkey *k, struct sshbuf *encoded,
+ goto out;
+ }
+ /* check that an appropriate amount of auth data is present */
+- if (sshbuf_len(encoded) < encrypted_len + authlen) {
++ if (sshbuf_len(encoded) < authlen ||
++ sshbuf_len(encoded) - authlen < encrypted_len) {
+ r = SSH_ERR_INVALID_FORMAT;
+ goto out;
+ }
+--
+2.7.4
+
diff --git a/meta/recipes-connectivity/openssh/openssh_8.0p1.bb b/meta/recipes-connectivity/openssh/openssh_8.0p1.bb
index 01eaecd4ec..2ffbc9a95f 100644
--- a/meta/recipes-connectivity/openssh/openssh_8.0p1.bb
+++ b/meta/recipes-connectivity/openssh/openssh_8.0p1.bb
@@ -24,6 +24,7 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
file://fix-potential-signed-overflow-in-pointer-arithmatic.patch \
file://sshd_check_keys \
file://add-test-support-for-busybox.patch \
+ file://0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch \
"
SRC_URI[md5sum] = "bf050f002fe510e1daecd39044e1122d"
SRC_URI[sha256sum] = "bd943879e69498e8031eb6b7f44d08cdc37d59a7ab689aa0b437320c3481fd68"
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Fix-broken-change-from-b3d113e.patch b/meta/recipes-connectivity/openssl/openssl/0001-Fix-broken-change-from-b3d113e.patch
deleted file mode 100644
index 6b4789fc70..0000000000
--- a/meta/recipes-connectivity/openssl/openssl/0001-Fix-broken-change-from-b3d113e.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 711a161f03ef9ed7cd149a22bf1203700c103e96 Mon Sep 17 00:00:00 2001
-From: Pauli <paul.dale@oracle.com>
-Date: Fri, 29 Mar 2019 09:24:07 +1000
-Subject: [PATCH] Fix broken change from b3d113e.
-
-Reviewed-by: Tim Hudson <tjh@openssl.org>
-(Merged from https://github.com/openssl/openssl/pull/8606)
-
-Running valgrind against code using Openssl v1.1.1c reports a large number of
-uninitialized memory errors. This fix from upstream solves this problem.
-
-Upstream-Status: Backport [https://github.com/openssl/openssl/commit/711a161f03ef9ed7cd149a22bf1203700c103e96]
-Signed-off-by: Laurent Bonnans <laurent.bonnans@here.com>
----
- crypto/rand/rand_lib.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/crypto/rand/rand_lib.c b/crypto/rand/rand_lib.c
-index 23abbde156..a298b7515b 100644
---- a/crypto/rand/rand_lib.c
-+++ b/crypto/rand/rand_lib.c
-@@ -235,8 +235,9 @@ size_t rand_drbg_get_nonce(RAND_DRBG *drbg,
- struct {
- void * instance;
- int count;
-- } data = { NULL, 0 };
-+ } data;
-
-+ memset(&data, 0, sizeof(data));
- pool = rand_pool_new(0, min_len, max_len);
- if (pool == NULL)
- return 0;
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Fix-build-error-for-aarch64-big-endian.patch b/meta/recipes-connectivity/openssl/openssl/0001-Fix-build-error-for-aarch64-big-endian.patch
deleted file mode 100644
index 9a90a68cfd..0000000000
--- a/meta/recipes-connectivity/openssl/openssl/0001-Fix-build-error-for-aarch64-big-endian.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 1f8c0f2feea5cdcae0bcd9dfc78198d9e2c4cf09 Mon Sep 17 00:00:00 2001
-From: Lei Maohui <leimaohui@cn.fujitsu.com>
-Date: Thu, 13 Jun 2019 12:17:30 +0900
-Subject: [PATCH] Fix build error for aarch64 big endian.
-
-Modified rev to rev64, because rev only takes integer registers.
-https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90827
-Otherwise, the following error will occur.
-
-Error: operand 1 must be an integer register -- `rev v31.16b,v31.16b'
-
-Upstream-Status: Submitted [https://github.com/openssl/openssl/pull/9151]
-
-Signed-off-by: Lei Maohui <leimaohui@cn.fujitsu.com>
----
- crypto/sha/asm/keccak1600-armv8.pl | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/crypto/sha/asm/keccak1600-armv8.pl b/crypto/sha/asm/keccak1600-armv8.pl
-index dc72f18..6620690 100755
---- a/crypto/sha/asm/keccak1600-armv8.pl
-+++ b/crypto/sha/asm/keccak1600-armv8.pl
-@@ -731,7 +731,7 @@ $code.=<<___;
- blo .Lprocess_block_ce
- ldr d31,[$inp],#8 // *inp++
- #ifdef __AARCH64EB__
-- rev v31.16b,v31.16b
-+ rev64 v31.16b,v31.16b
- #endif
- eor $A[$j/5][$j%5],$A[$j/5][$j%5],v31.16b
- beq .Lprocess_block_ce
-@@ -740,7 +740,7 @@ ___
- $code.=<<___;
- ldr d31,[$inp],#8 // *inp++
- #ifdef __AARCH64EB__
-- rev v31.16b,v31.16b
-+ rev64 v31.16b,v31.16b
- #endif
- eor $A[4][4],$A[4][4],v31.16b
-
---
-2.7.4
-
diff --git a/meta/recipes-connectivity/openssl/openssl_1.1.1c.bb b/meta/recipes-connectivity/openssl/openssl_1.1.1d.bb
index 75159ac725..8819e19ec4 100644
--- a/meta/recipes-connectivity/openssl/openssl_1.1.1c.bb
+++ b/meta/recipes-connectivity/openssl/openssl_1.1.1d.bb
@@ -16,16 +16,14 @@ SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
file://0001-skip-test_symbol_presence.patch \
file://0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch \
file://afalg.patch \
- file://0001-Fix-build-error-for-aarch64-big-endian.patch \
- file://0001-Fix-broken-change-from-b3d113e.patch \
"
SRC_URI_append_class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[md5sum] = "15e21da6efe8aa0e0768ffd8cd37a5f6"
-SRC_URI[sha256sum] = "f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90"
+SRC_URI[md5sum] = "3be209000dbc7e1b95bcdf47980a3baa"
+SRC_URI[sha256sum] = "1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2"
inherit lib_package multilib_header multilib_script ptest
MULTILIB_SCRIPTS = "${PN}-bin:${bindir}/c_rehash"
@@ -45,10 +43,10 @@ do_configure[cleandirs] = "${B}"
EXTRA_OECONF_append_libc-musl = " no-async"
EXTRA_OECONF_append_libc-musl_powerpc64 = " no-asm"
-# This prevents openssl from using getrandom() which is not available on older glibc versions
+# adding devrandom prevents openssl from using getrandom() which is not available on older glibc versions
# (native versions can be built with newer glibc, but then relocated onto a system with older glibc)
-EXTRA_OECONF_class-native = "--with-rand-seed=devrandom"
-EXTRA_OECONF_class-nativesdk = "--with-rand-seed=devrandom"
+EXTRA_OECONF_class-native = "--with-rand-seed=os,devrandom"
+EXTRA_OECONF_class-nativesdk = "--with-rand-seed=os,devrandom"
# Relying on hardcoded built-in paths causes openssl-native to not be relocateable from sstate.
CFLAGS_append_class-native = " -DOPENSSLDIR=/not/builtin -DENGINESDIR=/not/builtin"
@@ -150,7 +148,7 @@ do_install_append_class-native () {
OPENSSL_CONF=${libdir}/ssl-1.1/openssl.cnf \
SSL_CERT_DIR=${libdir}/ssl-1.1/certs \
SSL_CERT_FILE=${libdir}/ssl-1.1/cert.pem \
- OPENSSL_ENGINES=${libdir}/ssl-1.1/engines
+ OPENSSL_ENGINES=${libdir}/engines-1.1
}
do_install_append_class-nativesdk () {
diff --git a/meta/recipes-connectivity/socat/socat_1.7.3.3.bb b/meta/recipes-connectivity/socat/socat_1.7.3.3.bb
index 067f7c6444..1dbbe5cd55 100644
--- a/meta/recipes-connectivity/socat/socat_1.7.3.3.bb
+++ b/meta/recipes-connectivity/socat/socat_1.7.3.3.bb
@@ -5,7 +5,7 @@ HOMEPAGE = "http://www.dest-unreach.org/socat/"
SECTION = "console/network"
-DEPENDS = "openssl readline"
+DEPENDS = "openssl"
LICENSE = "GPL-2.0-with-OpenSSL-exception"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
@@ -39,9 +39,10 @@ TERMBITS_SHIFTS_powerpc64 = "sc_cv_sys_crdly_shift=12 \
sc_cv_sys_tabdly_shift=10 \
sc_cv_sys_csize_shift=8"
-PACKAGECONFIG_class-target ??= "tcp-wrappers"
-PACKAGECONFIG ??= ""
+PACKAGECONFIG_class-target ??= "tcp-wrappers readline"
+PACKAGECONFIG ??= "readline"
PACKAGECONFIG[tcp-wrappers] = "--enable-libwrap,--disable-libwrap,tcp-wrappers"
+PACKAGECONFIG[readline] = "--enable-readline,--disable-readline,readline"
do_install_prepend () {
mkdir -p ${D}${bindir}
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch
new file mode 100644
index 0000000000..7b0713cf6d
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch
@@ -0,0 +1,82 @@
+hostapd before 2.10 and wpa_supplicant before 2.10 allow an incorrect indication
+of disconnection in certain situations because source address validation is
+mishandled. This is a denial of service that should have been prevented by PMF
+(aka management frame protection). The attacker must send a crafted 802.11 frame
+from a location that is within the 802.11 communications range.
+
+CVE: CVE-2019-16275
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 8c07fa9eda13e835f3f968b2e1c9a8be3a851ff9 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <j@w1.fi>
+Date: Thu, 29 Aug 2019 11:52:04 +0300
+Subject: [PATCH] AP: Silently ignore management frame from unexpected source
+ address
+
+Do not process any received Management frames with unexpected/invalid SA
+so that we do not add any state for unexpected STA addresses or end up
+sending out frames to unexpected destination. This prevents unexpected
+sequences where an unprotected frame might end up causing the AP to send
+out a response to another device and that other device processing the
+unexpected response.
+
+In particular, this prevents some potential denial of service cases
+where the unexpected response frame from the AP might result in a
+connected station dropping its association.
+
+Signed-off-by: Jouni Malinen <j@w1.fi>
+---
+ src/ap/drv_callbacks.c | 13 +++++++++++++
+ src/ap/ieee802_11.c | 12 ++++++++++++
+ 2 files changed, 25 insertions(+)
+
+diff --git a/src/ap/drv_callbacks.c b/src/ap/drv_callbacks.c
+index 31587685fe3b..34ca379edc3d 100644
+--- a/src/ap/drv_callbacks.c
++++ b/src/ap/drv_callbacks.c
+@@ -131,6 +131,19 @@ int hostapd_notif_assoc(struct hostapd_data *hapd, const u8 *addr,
+ "hostapd_notif_assoc: Skip event with no address");
+ return -1;
+ }
++
++ if (is_multicast_ether_addr(addr) ||
++ is_zero_ether_addr(addr) ||
++ os_memcmp(addr, hapd->own_addr, ETH_ALEN) == 0) {
++ /* Do not process any frames with unexpected/invalid SA so that
++ * we do not add any state for unexpected STA addresses or end
++ * up sending out frames to unexpected destination. */
++ wpa_printf(MSG_DEBUG, "%s: Invalid SA=" MACSTR
++ " in received indication - ignore this indication silently",
++ __func__, MAC2STR(addr));
++ return 0;
++ }
++
+ random_add_randomness(addr, ETH_ALEN);
+
+ hostapd_logger(hapd, addr, HOSTAPD_MODULE_IEEE80211,
+diff --git a/src/ap/ieee802_11.c b/src/ap/ieee802_11.c
+index c85a28db44b7..e7065372e158 100644
+--- a/src/ap/ieee802_11.c
++++ b/src/ap/ieee802_11.c
+@@ -4626,6 +4626,18 @@ int ieee802_11_mgmt(struct hostapd_data *hapd, const u8 *buf, size_t len,
+ fc = le_to_host16(mgmt->frame_control);
+ stype = WLAN_FC_GET_STYPE(fc);
+
++ if (is_multicast_ether_addr(mgmt->sa) ||
++ is_zero_ether_addr(mgmt->sa) ||
++ os_memcmp(mgmt->sa, hapd->own_addr, ETH_ALEN) == 0) {
++ /* Do not process any frames with unexpected/invalid SA so that
++ * we do not add any state for unexpected STA addresses or end
++ * up sending out frames to unexpected destination. */
++ wpa_printf(MSG_DEBUG, "MGMT: Invalid SA=" MACSTR
++ " in received frame - ignore this frame silently",
++ MAC2STR(mgmt->sa));
++ return 0;
++ }
++
+ if (stype == WLAN_FC_STYPE_BEACON) {
+ handle_beacon(hapd, mgmt, len, fi);
+ return 1;
+--
+2.20.1
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
index c16978cfe8..3e92427bb0 100644
--- a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
@@ -2,7 +2,7 @@ SUMMARY = "Client for Wi-Fi Protected Access (WPA)"
HOMEPAGE = "http://w1.fi/wpa_supplicant/"
BUGTRACKER = "http://w1.fi/security/"
SECTION = "network"
-LICENSE = "BSD"
+LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=279b4f5abb9c153c285221855ddb78cc \
file://README;beginline=1;endline=56;md5=e7d3dbb01f75f0b9799e192731d1e1ff \
file://wpa_supplicant/wpa_supplicant.c;beginline=1;endline=12;md5=0a8b56d3543498b742b9c0e94cc2d18b"
@@ -25,6 +25,7 @@ SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \
file://wpa_supplicant.conf-sane \
file://99_wpa_supplicant \
file://0001-replace-systemd-install-Alias-with-WantedBy.patch \
+ file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \
"
SRC_URI[md5sum] = "2d2958c782576dc9901092fbfecb4190"
SRC_URI[sha256sum] = "fcbdee7b4a64bea8177973299c8c824419c413ec2e3a95db63dd6a5dc3541f17"
diff --git a/meta/recipes-core/base-files/base-files_3.0.14.bb b/meta/recipes-core/base-files/base-files_3.0.14.bb
index 2b1f217ddb..94299431f6 100644
--- a/meta/recipes-core/base-files/base-files_3.0.14.bb
+++ b/meta/recipes-core/base-files/base-files_3.0.14.bb
@@ -20,7 +20,6 @@ SRC_URI = "file://rotation \
file://fstab \
file://issue.net \
file://issue \
- file://usbd \
file://share/dot.bashrc \
file://share/dot.profile \
file://licenses/GPL-2 \
@@ -122,7 +121,6 @@ do_install () {
fi
install -m 0644 ${WORKDIR}/fstab ${D}${sysconfdir}/fstab
- install -m 0644 ${WORKDIR}/usbd ${D}${sysconfdir}/default/usbd
install -m 0644 ${WORKDIR}/profile ${D}${sysconfdir}/profile
sed -i 's#ROOTHOME#${ROOT_HOME}#' ${D}${sysconfdir}/profile
sed -i 's#@BINDIR@#${bindir}#g' ${D}${sysconfdir}/profile
diff --git a/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb b/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb
index c347daf501..61fb8cbad1 100644
--- a/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb
+++ b/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb
@@ -13,15 +13,16 @@ do_compile() {
}
do_install() {
- install -d ${D}${sysconfdir}
- install -D -m 0644 ${WORKDIR}/inittab ${D}${sysconfdir}/inittab
- tmp="${SERIAL_CONSOLES}"
- for i in $tmp
- do
- j=`echo ${i} | sed s/\;/\ /g`
- id=`echo ${i} | sed -e 's/^.*;//' -e 's/;.*//'`
- echo "$id::respawn:${base_sbindir}/getty ${j}" >> ${D}${sysconfdir}/inittab
- done
+ install -d ${D}${sysconfdir}
+ install -D -m 0644 ${WORKDIR}/inittab ${D}${sysconfdir}/inittab
+ tmp="${SERIAL_CONSOLES}"
+ [ -n "$tmp" ] && echo >> ${D}${sysconfdir}/inittab
+ for i in $tmp
+ do
+ j=`echo ${i} | sed s/\;/\ /g`
+ id=`echo ${i} | sed -e 's/^.*;//' -e 's/;.*//'`
+ echo "$id::respawn:${base_sbindir}/getty ${j}" >> ${D}${sysconfdir}/inittab
+ done
}
# SERIAL_CONSOLES is generally defined by the MACHINE .conf.
diff --git a/meta/recipes-core/busybox/busybox.inc b/meta/recipes-core/busybox/busybox.inc
index 49165d7f51..d08fa8d3d9 100644
--- a/meta/recipes-core/busybox/busybox.inc
+++ b/meta/recipes-core/busybox/busybox.inc
@@ -128,7 +128,9 @@ do_prepare_config () {
${S}/.config.oe-tmp > ${S}/.config
fi
sed -i 's/CONFIG_IFUPDOWN_UDHCPC_CMD_OPTIONS="-R -n"/CONFIG_IFUPDOWN_UDHCPC_CMD_OPTIONS="-R -b"/' ${S}/.config
- sed -i 's|${DEBUG_PREFIX_MAP}||g' ${S}/.config
+ if [ -n "${DEBUG_PREFIX_MAP}" ]; then
+ sed -i 's|${DEBUG_PREFIX_MAP}||g' ${S}/.config
+ fi
}
# returns all the elements from the src uri that are .cfg files
@@ -142,6 +144,7 @@ def find_cfgs(d):
return sources_list
do_configure () {
+ set -x
do_prepare_config
merge_config.sh -m .config ${@" ".join(find_cfgs(d))}
cml1_do_configure
@@ -316,8 +319,8 @@ do_install () {
fi
fi
if grep -q "CONFIG_INIT=y" ${B}/.config; then
- install -D -m 0777 ${WORKDIR}/rcS ${D}${sysconfdir}/init.d/rcS
- install -D -m 0777 ${WORKDIR}/rcK ${D}${sysconfdir}/init.d/rcK
+ install -D -m 0755 ${WORKDIR}/rcS ${D}${sysconfdir}/init.d/rcS
+ install -D -m 0755 ${WORKDIR}/rcK ${D}${sysconfdir}/init.d/rcK
fi
if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
diff --git a/meta/recipes-core/busybox/busybox_1.31.0.bb b/meta/recipes-core/busybox/busybox_1.31.0.bb
index c1da372629..34b1f2cc91 100644
--- a/meta/recipes-core/busybox/busybox_1.31.0.bb
+++ b/meta/recipes-core/busybox/busybox_1.31.0.bb
@@ -9,7 +9,6 @@ SRC_URI = "http://www.busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
file://default.script \
file://simple.script \
file://hwclock.sh \
- file://mount.busybox \
file://syslog \
file://syslog-startup.conf \
file://syslog.conf \
@@ -17,7 +16,6 @@ SRC_URI = "http://www.busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
file://mdev \
file://mdev.conf \
file://mdev-mount.sh \
- file://umount.busybox \
file://defconfig \
file://busybox-syslog.service.in \
file://busybox-klogd.service.in \
@@ -38,7 +36,6 @@ SRC_URI = "http://www.busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
${@["", "file://mdev.cfg"][(d.getVar('VIRTUAL-RUNTIME_dev_manager') == 'busybox-mdev')]} \
file://syslog.cfg \
file://unicode.cfg \
- file://inittab \
file://rcS \
file://rcK \
file://makefile-libbb-race.patch \
diff --git a/meta/recipes-core/busybox/files/mount.busybox b/meta/recipes-core/busybox/files/mount.busybox
deleted file mode 100755
index fef945b7b2..0000000000
--- a/meta/recipes-core/busybox/files/mount.busybox
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-
-exec /bin/busybox mount $@
diff --git a/meta/recipes-core/busybox/files/umount.busybox b/meta/recipes-core/busybox/files/umount.busybox
deleted file mode 100755
index f3731626e6..0000000000
--- a/meta/recipes-core/busybox/files/umount.busybox
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-
-exec /bin/busybox umount $@
diff --git a/meta/recipes-core/coreutils/coreutils_8.31.bb b/meta/recipes-core/coreutils/coreutils_8.31.bb
index 4a74f619af..57b2c1bdba 100644
--- a/meta/recipes-core/coreutils/coreutils_8.31.bb
+++ b/meta/recipes-core/coreutils/coreutils_8.31.bb
@@ -49,7 +49,7 @@ bindir_progs = "arch basename chcon cksum comm csplit cut dir dircolors dirname
env expand expr factor fmt fold groups head hostid id install \
join link logname md5sum mkfifo nl nohup nproc od paste pathchk \
pinky pr printf ptx readlink realpath runcon seq sha1sum sha224sum sha256sum \
- sha384sum sha512sum shred shuf sort split stdbuf sum tac tail tee test timeout \
+ sha384sum sha512sum shred shuf sort split sum tac tail tee test timeout \
tr truncate tsort tty unexpand uniq unlink uptime users vdir wc who whoami yes"
# hostname gets a special treatment and is not included in this
@@ -58,6 +58,10 @@ base_bindir_progs = "cat chgrp chmod chown cp date dd echo false hostname kill l
sbindir_progs= "chroot"
+PACKAGE_BEFORE_PN_class-target += "coreutils-stdbuf"
+FILES_coreutils-stdbuf = "${bindir}/stdbuf ${libdir}/coreutils/libstdbuf.so"
+RDEPENDS_coreutils_class-target += "coreutils-stdbuf"
+
# Let aclocal use the relative path for the m4 file rather than the
# absolute since coreutils has a lot of m4 files, otherwise there might
# be an "Argument list too long" error when it is built in a long/deep
diff --git a/meta/recipes-core/dbus/dbus/dbus-1.init b/meta/recipes-core/dbus/dbus/dbus-1.init
index 42c86297c3..90e167e572 100644
--- a/meta/recipes-core/dbus/dbus/dbus-1.init
+++ b/meta/recipes-core/dbus/dbus/dbus-1.init
@@ -21,8 +21,8 @@
DAEMON=@bindir@/dbus-daemon
NAME=dbus
-DAEMONUSER=messagebus # must match /etc/dbus-1/system.conf
-PIDFILE=/var/run/messagebus.pid # must match /etc/dbus-1/system.conf
+DAEMONUSER=messagebus # must match /usr/share/dbus-1/system.conf
+PIDFILE=/var/run/dbus/pid # must match /usr/share/dbus-1/system.conf
UUIDDIR=/var/lib/dbus
DESC="system message bus"
EVENTDIR=/etc/dbus-1/event.d
diff --git a/meta/recipes-core/dbus/dbus_1.12.16.bb b/meta/recipes-core/dbus/dbus_1.12.16.bb
index 05716608b0..f4fec2365c 100644
--- a/meta/recipes-core/dbus/dbus_1.12.16.bb
+++ b/meta/recipes-core/dbus/dbus_1.12.16.bb
@@ -92,11 +92,13 @@ pkg_postinst_dbus() {
fi
}
+
EXTRA_OECONF = "--disable-tests \
--disable-xml-docs \
--disable-doxygen-docs \
--disable-libaudit \
--enable-largefile \
+ --with-system-socket=/run/dbus/system_bus_socket \
"
EXTRA_OECONF_append_class-target = " SYSTEMCTL=${base_bindir}/systemctl"
diff --git a/meta/recipes-core/ell/ell_0.22.bb b/meta/recipes-core/ell/ell_0.26.bb
index b3942fc30e..f1f252ce4f 100644
--- a/meta/recipes-core/ell/ell_0.22.bb
+++ b/meta/recipes-core/ell/ell_0.26.bb
@@ -14,8 +14,8 @@ DEPENDS = "dbus"
inherit autotools pkgconfig
SRC_URI = "https://mirrors.edge.kernel.org/pub/linux/libs/${BPN}/${BPN}-${PV}.tar.xz"
-SRC_URI[md5sum] = "a4e7d74404f11e71775b89f53a8f1c33"
-SRC_URI[sha256sum] = "3c1d6d997e17dfcbe4ebcd1331d9a7be5c64f2f0a0813bc223790e570d8da2e3"
+SRC_URI[md5sum] = "4660e25541071e933a2bb02ef2f94e7d"
+SRC_URI[sha256sum] = "7855b4b8f271ba6ee67d87d0965b975a9a8dbeaa616665ca2248afa3b5fcbc77"
do_configure_prepend () {
mkdir -p ${S}/build-aux
diff --git a/meta/recipes-core/expat/expat_2.2.7.bb b/meta/recipes-core/expat/expat_2.2.9.bb
index f213bc3c3e..8f3db41352 100644
--- a/meta/recipes-core/expat/expat_2.2.7.bb
+++ b/meta/recipes-core/expat/expat_2.2.9.bb
@@ -10,8 +10,8 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/expat/expat-${PV}.tar.bz2 \
file://libtool-tag.patch \
"
-SRC_URI[md5sum] = "72f36b87cdb478aba1e78473393766aa"
-SRC_URI[sha256sum] = "cbc9102f4a31a8dafd42d642e9a3aa31e79a0aedaa1f6efd2795ebc83174ec18"
+SRC_URI[md5sum] = "875a2c2ff3e8eb9e5a5cd62db2033ab5"
+SRC_URI[sha256sum] = "f1063084dc4302a427dabcca499c8312b3a32a29b7d2506653ecc8f950a9a237"
inherit autotools lib_package
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-Do-not-write-bindir-into-pkg-config-files.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-Do-not-write-bindir-into-pkg-config-files.patch
index ede29c90ba..edac4c9f75 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0/0001-Do-not-write-bindir-into-pkg-config-files.patch
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0001-Do-not-write-bindir-into-pkg-config-files.patch
@@ -1,4 +1,4 @@
-From 474e59abec88de0c455836c1f53152bf2aa26c34 Mon Sep 17 00:00:00 2001
+From 60b36289ac314ad972cf81c1acd19f6f2e58ff25 Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Fri, 15 Feb 2019 11:17:27 +0100
Subject: [PATCH] Do not write $bindir into pkg-config files
@@ -9,33 +9,44 @@ rather than use target paths).
Upstream-Status: Inappropriate [upstream wants the paths in .pc files]
Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
---
- gio/meson.build | 6 +++---
- glib/meson.build | 6 +++---
- 2 files changed, 6 insertions(+), 6 deletions(-)
+ gio/meson.build | 16 ++++++++--------
+ glib/meson.build | 6 +++---
+ 2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/gio/meson.build b/gio/meson.build
-index 85d8b14..657720a 100644
+index 71e88c4..8ce3987 100644
--- a/gio/meson.build
+++ b/gio/meson.build
-@@ -813,9 +813,9 @@ pkg.generate(libraries : libgio,
+@@ -831,14 +831,14 @@ pkg.generate(libgio,
'schemasdir=' + join_paths('${datadir}', schemas_subdir),
'bindir=' + join_paths('${prefix}', get_option('bindir')),
'giomoduledir=' + giomodulesdir,
+- 'gio=' + join_paths('${bindir}', 'gio'),
+- 'gio_querymodules=' + join_paths('${bindir}', 'gio-querymodules'),
- 'glib_compile_schemas=' + join_paths('${bindir}', 'glib-compile-schemas'),
- 'glib_compile_resources=' + join_paths('${bindir}', 'glib-compile-resources'),
-- 'gdbus_codegen=' + join_paths('${bindir}', 'gdbus-codegen')],
+- 'gdbus=' + join_paths('${bindir}', 'gdbus'),
+- 'gdbus_codegen=' + join_paths('${bindir}', 'gdbus-codegen'),
+- 'gresource=' + join_paths('${bindir}', 'gresource'),
+- 'gsettings=' + join_paths('${bindir}', 'gsettings')],
++ 'gio=gio',
++ 'gio_querymodules=gio-querymodules',
+ 'glib_compile_schemas=glib-compile-schemas',
+ 'glib_compile_resources=glib-compile-resources',
-+ 'gdbus_codegen=gdbus-codegen'],
++ 'gdbus=gdbus',
++ 'gdbus_codegen=gdbus-codegen',
++ 'gresource=gresource',
++ 'gsettings=gsettings'],
version : glib_version,
install_dir : glib_pkgconfigreldir,
filebase : 'gio-2.0',
diff --git a/glib/meson.build b/glib/meson.build
-index c05c694..434e8b1 100644
+index 91a48f1..978fb73 100644
--- a/glib/meson.build
+++ b/glib/meson.build
-@@ -261,9 +261,9 @@ pkg.generate(libraries : [libglib, libintl],
+@@ -375,9 +375,9 @@ pkg.generate(libglib,
subdirs : ['glib-2.0'],
extra_cflags : ['-I${libdir}/glib-2.0/include'] + win32_cflags,
variables : ['bindir=' + join_paths('${prefix}', get_option('bindir')),
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-Fix-DATADIRNAME-on-uclibc-Linux.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-Fix-DATADIRNAME-on-uclibc-Linux.patch
new file mode 100644
index 0000000000..d8cf269bb8
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0001-Fix-DATADIRNAME-on-uclibc-Linux.patch
@@ -0,0 +1,34 @@
+From 15f807481de53942525b48952c5b6bbb9fb66542 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 15 Mar 2014 22:42:29 -0700
+Subject: [PATCH] Fix DATADIRNAME on uclibc/Linux
+
+translation files are always installed under PREFIX/share/locale in uclibc
+based systems therefore lets set DATADIRNAME to "share".
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Upstream-Status: Pending
+
+%% original patch: uclibc_musl_translation.patch
+---
+ m4macros/glib-gettext.m4 | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/m4macros/glib-gettext.m4 b/m4macros/glib-gettext.m4
+index df6fbf0..47db864 100644
+--- a/m4macros/glib-gettext.m4
++++ b/m4macros/glib-gettext.m4
+@@ -293,6 +293,10 @@ msgstr ""
+ CATOBJEXT=.mo
+ DATADIRNAME=share
+ ;;
++ *-*-musl* | *-*-linux-uclibc*)
++ CATOBJEXT=.gmo
++ DATADIRNAME=share
++ ;;
+ *)
+ CATOBJEXT=.mo
+ DATADIRNAME=lib
+--
+2.17.1
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-Set-host_machine-correctly-when-building-with-mingw3.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-Set-host_machine-correctly-when-building-with-mingw3.patch
index d22a646c5d..b02169e09b 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0/0001-Set-host_machine-correctly-when-building-with-mingw3.patch
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0001-Set-host_machine-correctly-when-building-with-mingw3.patch
@@ -1,4 +1,4 @@
-From f5a4b4c0579734923c9caf70944322efff57318b Mon Sep 17 00:00:00 2001
+From cfff734af6bff6a30a649f784ecf698658c01884 Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Wed, 13 Feb 2019 15:32:05 +0100
Subject: [PATCH] Set host_machine correctly when building with mingw32
@@ -14,11 +14,11 @@ Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
4 files changed, 9 insertions(+), 6 deletions(-)
diff --git a/gio/tests/meson.build b/gio/tests/meson.build
-index 028b196..217ccb1 100644
+index 95aafc1..9025eb2 100644
--- a/gio/tests/meson.build
+++ b/gio/tests/meson.build
-@@ -12,7 +12,7 @@ test_c_args = [
- '-DGLIB_COMPILE_SCHEMAS="@0@"'.format(glib_compile_schemas.full_path()),
+@@ -13,7 +13,7 @@ test_c_args = [
+ '-UG_DISABLE_ASSERT',
]
-if host_machine.system() == 'windows'
@@ -26,7 +26,7 @@ index 028b196..217ccb1 100644
common_gio_tests_deps += [iphlpapi_dep, winsock2, cc.find_library ('secur32')]
endif
-@@ -119,7 +119,7 @@ if dbus1_dep.found()
+@@ -120,7 +120,7 @@ if dbus1_dep.found()
endif
# Test programs buildable on UNIX only
@@ -35,7 +35,7 @@ index 028b196..217ccb1 100644
gio_tests += {
'file' : {},
'gdbus-peer' : {
-@@ -327,7 +327,7 @@ if host_machine.system() != 'windows'
+@@ -332,7 +332,7 @@ if host_machine.system() != 'windows'
endif # unix
# Test programs buildable on Windows only
@@ -44,7 +44,7 @@ index 028b196..217ccb1 100644
gio_tests += {'win32-streams' : {}}
endif
-@@ -392,7 +392,7 @@ if cc.get_id() != 'msvc'
+@@ -397,7 +397,7 @@ if cc.get_id() != 'msvc' and cc.get_id() != 'clang-cl'
}
endif
@@ -54,7 +54,7 @@ index 028b196..217ccb1 100644
'gdbus-example-unix-fd-client' : {
'install' : false,
diff --git a/glib/tests/meson.build b/glib/tests/meson.build
-index d54fc41..a4761fe 100644
+index c47133f..cad975f 100644
--- a/glib/tests/meson.build
+++ b/glib/tests/meson.build
@@ -132,7 +132,7 @@ if glib_conf.has('HAVE_EVENTFD')
@@ -67,10 +67,10 @@ index d54fc41..a4761fe 100644
glib_tests += {
'gpoll' : {
diff --git a/meson.build b/meson.build
-index a745024..e87eae5 100644
+index 717d1bc..2a3beb8 100644
--- a/meson.build
+++ b/meson.build
-@@ -31,6 +31,9 @@ else
+@@ -32,6 +32,9 @@ else
endif
host_system = host_machine.system()
@@ -81,7 +81,7 @@ index a745024..e87eae5 100644
glib_version = meson.project_version()
glib_api_version = '2.0'
diff --git a/tests/meson.build b/tests/meson.build
-index 11075dd..cd6067b 100644
+index ce30442..5710f2c 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -66,7 +66,7 @@ test_extra_programs = {
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-meson-Run-atomics-test-on-clang-as-well.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-meson-Run-atomics-test-on-clang-as-well.patch
new file mode 100644
index 0000000000..833ad2540e
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0001-meson-Run-atomics-test-on-clang-as-well.patch
@@ -0,0 +1,31 @@
+From cce617bec254e327ed7bcad60f58208024c6dc42 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 12 Oct 2019 17:46:26 -0700
+Subject: [PATCH] meson: Run atomics test on clang as well
+
+Fixes
+./glib-2.62.1/glib/gatomic.c:675:2: error: G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
+^
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ meson.build | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/meson.build b/meson.build
+index 9731de4..c678931 100644
+--- a/meson.build
++++ b/meson.build
+@@ -1634,7 +1634,7 @@ atomicdefine = '''
+ # We know that we can always use real ("lock free") atomic operations with MSVC
+ if cc.get_id() == 'msvc' or cc.get_id() == 'clang-cl' or cc.links(atomictest, name : 'atomic ops')
+ have_atomic_lock_free = true
+- if cc.get_id() == 'gcc' and not cc.compiles(atomicdefine, name : 'atomic ops define')
++ if (cc.get_id() == 'gcc' or cc.get_id() == 'clang') and not cc.compiles(atomicdefine, name : 'atomic ops define')
+ # Old gcc release may provide
+ # __sync_bool_compare_and_swap but doesn't define
+ # __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+--
+2.23.0
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch
deleted file mode 100644
index d1ed028759..0000000000
--- a/meta/recipes-core/glib-2.0/glib-2.0/0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-Upstream-Status: Backport [fc88e56bfc2b09a8fb2b350e76f6425ab0a056d7]
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-
-From 141acf6a2f3b21d63c9cfe620b8e20a506e78493 Mon Sep 17 00:00:00 2001
-From: Ross Burton <ross.burton@intel.com>
-Date: Wed, 13 Mar 2019 16:22:09 +0000
-Subject: [PATCH] meson: do a build-time check for strlcpy before attempting
- runtime check
-
-In cross-compilation environments the runtime check isn't possible so it is up
-to the builder to seed the cross file, but we can definitely state that strlcpy
-doesn't exist with a build test.
----
- meson.build | 30 ++++++++++++++++--------------
- 1 file changed, 16 insertions(+), 14 deletions(-)
-
-diff --git a/meson.build b/meson.build
-index 15039e448..414f2d9b1 100644
---- a/meson.build
-+++ b/meson.build
-@@ -1860,22 +1860,24 @@ endif
-
- # Test if we have strlcpy/strlcat with a compatible implementation:
- # https://bugzilla.gnome.org/show_bug.cgi?id=53933
--if cc_can_run
-- rres = cc.run('''#include <stdlib.h>
-- #include <string.h>
-- int main() {
-- char p[10];
-- (void) strlcpy (p, "hi", 10);
-- if (strlcat (p, "bye", 0) != 3)
-- return 1;
-- return 0;
-- }''',
-- name : 'OpenBSD strlcpy/strlcat')
-- if rres.compiled() and rres.returncode() == 0
-+if cc.has_function('strlcpy')
-+ if cc_can_run
-+ rres = cc.run('''#include <stdlib.h>
-+ #include <string.h>
-+ int main() {
-+ char p[10];
-+ (void) strlcpy (p, "hi", 10);
-+ if (strlcat (p, "bye", 0) != 3)
-+ return 1;
-+ return 0;
-+ }''',
-+ name : 'OpenBSD strlcpy/strlcat')
-+ if rres.compiled() and rres.returncode() == 0
-+ glib_conf.set('HAVE_STRLCPY', 1)
-+ endif
-+ elif meson.get_cross_property('have_strlcpy', false)
- glib_conf.set('HAVE_STRLCPY', 1)
- endif
--elif meson.get_cross_property('have_strlcpy', false)
-- glib_conf.set('HAVE_STRLCPY', 1)
- endif
-
- python = import('python').find_installation('python3')
---
-2.11.0
-
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-meson.build-do-not-hardcode-linux-as-the-host-system.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-meson.build-do-not-hardcode-linux-as-the-host-system.patch
deleted file mode 100644
index 5a1a589890..0000000000
--- a/meta/recipes-core/glib-2.0/glib-2.0/0001-meson.build-do-not-hardcode-linux-as-the-host-system.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 635fe26af51f20194c8b208e7d01303be1086d68 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex.kanavin@gmail.com>
-Date: Tue, 19 Feb 2019 10:31:11 +0100
-Subject: [PATCH] meson.build: do not hardcode 'linux' as the host system
-
-OE build system can set this to other values that include 'linux',
-e.g. 'linux-gnueabi'
-
-Upstream-Status: Pending
-Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
----
- meson.build | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/meson.build b/meson.build
-index 4348f20..af5ed63 100644
---- a/meson.build
-+++ b/meson.build
-@@ -1574,7 +1574,7 @@ atomicdefine = '''
- # We know that we can always use real ("lock free") atomic operations with MSVC
- if cc.get_id() == 'msvc' or cc.links(atomictest, name : 'atomic ops')
- have_atomic_lock_free = true
-- if (host_system == 'android' or host_system == 'linux') and not cc.compiles(atomicdefine, name : 'atomic ops define')
-+ if (host_system == 'android' or host_system.contains('linux')) and not cc.compiles(atomicdefine, name : 'atomic ops define')
- # When building for armv5 on Linux, gcc provides
- # __sync_bool_compare_and_swap but doesn't define
- # __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch b/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch
index 380bee086c..7e9925845b 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch
+++ b/meta/recipes-core/glib-2.0/glib-2.0/relocate-modules.patch
@@ -1,4 +1,4 @@
-From 9c5d6e6ce5254a5f050bba2118a4a1807292c02a Mon Sep 17 00:00:00 2001
+From 6325bf4e8a2f569c55c8e1a36b9439d3566f98f6 Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Fri, 11 Mar 2016 15:35:55 +0000
Subject: [PATCH] glib-2.0: relocate the GIO module directory for native builds
@@ -19,10 +19,10 @@ Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/gio/giomodule.c b/gio/giomodule.c
-index b92162d..fce9933 100644
+index 1007abd..5380982 100644
--- a/gio/giomodule.c
+++ b/gio/giomodule.c
-@@ -40,6 +40,8 @@
+@@ -44,6 +44,8 @@
#include "gnetworkmonitor.h"
#ifdef G_OS_WIN32
#include "gregistrysettingsbackend.h"
@@ -31,7 +31,7 @@ index b92162d..fce9933 100644
#endif
#include <glib/gstdio.h>
-@@ -1156,7 +1158,15 @@ get_gio_module_dir (void)
+@@ -1158,7 +1160,15 @@ get_gio_module_dir (void)
#endif
g_free (install_dir);
#else
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/uclibc_musl_translation.patch b/meta/recipes-core/glib-2.0/glib-2.0/uclibc_musl_translation.patch
deleted file mode 100644
index 7aa6217d69..0000000000
--- a/meta/recipes-core/glib-2.0/glib-2.0/uclibc_musl_translation.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-Fix DATADIRNAME on uclibc/Linux
-
-translation files are always installed under PREFIX/share/locale in uclibc
-based systems therefore lets set DATADIRNAME to "share".
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Upstream-Status: Pending
-Index: glib-2.46.1/m4macros/glib-gettext.m4
-===================================================================
---- glib-2.46.1.orig/m4macros/glib-gettext.m4
-+++ glib-2.46.1/m4macros/glib-gettext.m4
-@@ -243,6 +243,10 @@ msgstr ""
- CATOBJEXT=.mo
- DATADIRNAME=share
- ;;
-+ *-*-musl* | *-*-linux-uclibc*)
-+ CATOBJEXT=.gmo
-+ DATADIRNAME=share
-+ ;;
- *)
- CATOBJEXT=.mo
- DATADIRNAME=lib
diff --git a/meta/recipes-core/glib-2.0/glib-2.0_2.60.6.bb b/meta/recipes-core/glib-2.0/glib-2.0_2.62.1.bb
index a3c5a09d41..7897170a7d 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0_2.60.6.bb
+++ b/meta/recipes-core/glib-2.0/glib-2.0_2.62.1.bb
@@ -6,7 +6,7 @@ SHRT_VER = "${@oe.utils.trim_version("${PV}", 2)}"
SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://run-ptest \
- file://uclibc_musl_translation.patch \
+ file://0001-Fix-DATADIRNAME-on-uclibc-Linux.patch \
file://Enable-more-tests-while-cross-compiling.patch \
file://0001-Remove-the-warning-about-deprecated-paths-in-schemas.patch \
file://0001-Install-gio-querymodules-as-libexec_PROGRAM.patch \
@@ -14,12 +14,11 @@ SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://0010-Do-not-hardcode-python-path-into-various-tools.patch \
file://0001-Set-host_machine-correctly-when-building-with-mingw3.patch \
file://0001-Do-not-write-bindir-into-pkg-config-files.patch \
- file://0001-meson.build-do-not-hardcode-linux-as-the-host-system.patch \
- file://0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch \
+ file://0001-meson-Run-atomics-test-on-clang-as-well.patch \
"
SRC_URI_append_class-native = " file://relocate-modules.patch"
SRC_URI_append_class-target = " file://glib-meson.cross"
-SRC_URI[md5sum] = "cd6865d8ce40db5e4c12b7d180953de6"
-SRC_URI[sha256sum] = "ff8fab8d8deaa4fd0536c90f90d9769a09071779c7e6183907f6855645bffb6c"
+SRC_URI[md5sum] = "64c14b4fe46c478992560c2f48a5b649"
+SRC_URI[sha256sum] = "3dd9024e1d0872a6da7ac509937ccf997161b11d7d35be337c7e829cbae0f9df"
diff --git a/meta/recipes-core/glib-2.0/glib.inc b/meta/recipes-core/glib-2.0/glib.inc
index 3ae22f5e80..8b95f21204 100644
--- a/meta/recipes-core/glib-2.0/glib.inc
+++ b/meta/recipes-core/glib-2.0/glib.inc
@@ -158,6 +158,8 @@ RDEPENDS_${PN}-ptest += "\
${PN}-locale-pl \
${PN}-locale-ru \
${PN}-locale-th \
+ python3-core \
+ python3-modules \
"
RDEPENDS_${PN}-ptest_append_libc-glibc = "\
diff --git a/meta/recipes-core/glib-networking/glib-networking_2.60.3.bb b/meta/recipes-core/glib-networking/glib-networking_2.62.1.bb
index d893b7a3a1..81d3fd43d9 100644
--- a/meta/recipes-core/glib-networking/glib-networking_2.60.3.bb
+++ b/meta/recipes-core/glib-networking/glib-networking_2.62.1.bb
@@ -9,8 +9,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
SECTION = "libs"
DEPENDS = "glib-2.0"
-SRC_URI[archive.md5sum] = "e8fd0462a82269fb4bbd6c07a1e7d0f4"
-SRC_URI[archive.sha256sum] = "d50183046a4ff955d8cc7e953067cdfc94f14dbfda3024bf377ff37a3121dcd5"
+SRC_URI[archive.md5sum] = "64ca1e1e43e623b916059585bf7e4758"
+SRC_URI[archive.sha256sum] = "3c55ae6771ad7a79fa606a834f4686ed555c2774ed6e9ece6f3c0f6a3dab7110"
PACKAGECONFIG ??= "gnutls"
@@ -30,3 +30,5 @@ FILES_${PN} += "\
"
FILES_${PN}-dev += "${libdir}/gio/modules/libgio*.la"
FILES_${PN}-staticdev += "${libdir}/gio/modules/libgio*.a"
+
+BBCLASSEXTEND = "native"
diff --git a/meta/recipes-core/glibc/glibc-package.inc b/meta/recipes-core/glibc/glibc-package.inc
index 9b1e7b7903..d7037c5cce 100644
--- a/meta/recipes-core/glibc/glibc-package.inc
+++ b/meta/recipes-core/glibc/glibc-package.inc
@@ -83,14 +83,6 @@ do_install_append () {
rm -f ${D}${infodir}/dir
fi
- if ! ${@bb.utils.contains('DISTRO_FEATURES', 'ldconfig', 'true', 'false', d)}; then
- # The distro doesn't want these files so let's not install them
- rm -f ${D}${sysconfdir}/ld.so.conf
- rm -f ${D}${base_sbindir}/ldconfig
- # This directory will be empty now so remove it too.
- rmdir ${D}${sysconfdir}
- fi
-
install -d ${D}${sysconfdir}/init.d
install -d ${D}${localstatedir}/db/nscd
install -m 0755 ${S}/nscd/nscd.init ${D}${sysconfdir}/init.d/nscd
@@ -102,6 +94,24 @@ do_install_append () {
install -d ${D}${systemd_unitdir}/system
install -m 0644 ${S}/nscd/nscd.service ${D}${systemd_unitdir}/system/
+ # The dynamic loader will have been installed into
+ # ${base_libdir}. However, if that isn't going to end up being
+ # available in the ABI-mandated location, then a symlink must
+ # be created.
+
+ if [ -n "${ARCH_DYNAMIC_LOADER}" -a ! -e "${D}${root_prefix}/lib/${ARCH_DYNAMIC_LOADER}" ]; then
+ install -d ${D}${root_prefix}/lib
+ ln -s ${@oe.path.relative('${root_prefix}/lib', '${base_libdir}')}/${ARCH_DYNAMIC_LOADER} \
+ ${D}${root_prefix}/lib/${ARCH_DYNAMIC_LOADER}
+ fi
+}
+
+do_install_append_class-target() {
+ if ! ${@bb.utils.contains('DISTRO_FEATURES', 'ldconfig', 'true', 'false', d)}; then
+ # The distro doesn't want these files so let's not install them
+ rm -f ${D}${sysconfdir}/ld.so.conf
+ rm -f ${D}${base_sbindir}/ldconfig
+ fi
if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then
install -d ${D}${sysconfdir}/tmpfiles.d
echo "d /run/nscd 755 root root -" \
@@ -114,18 +124,7 @@ do_install_append () {
> ${D}${sysconfdir}/default/volatiles/98_nscd
fi
- # The dynamic loader will have been installed into
- # ${base_libdir}. However, if that isn't going to end up being
- # available in the ABI-mandated location, then a symlink must
- # be created.
-
- if [ -n "${ARCH_DYNAMIC_LOADER}" -a ! -e "${D}${root_prefix}/lib/${ARCH_DYNAMIC_LOADER}" ]; then
- install -d ${D}${root_prefix}/lib
- ln -s ${@oe.path.relative('${root_prefix}/lib', '${base_libdir}')}/${ARCH_DYNAMIC_LOADER} \
- ${D}${root_prefix}/lib/${ARCH_DYNAMIC_LOADER}
- fi
}
-
do_install_append_aarch64 () {
do_install_armmultilib
}
diff --git a/meta/recipes-core/glibc/glibc-testsuite_2.30.bb b/meta/recipes-core/glibc/glibc-testsuite_2.30.bb
index 64fa8d87df..657fd4dbc1 100644
--- a/meta/recipes-core/glibc/glibc-testsuite_2.30.bb
+++ b/meta/recipes-core/glibc/glibc-testsuite_2.30.bb
@@ -8,6 +8,13 @@ PROVIDES = ""
# setup depends
INHIBIT_DEFAULT_DEPS = ""
+python () {
+ libc = d.getVar("PREFERRED_PROVIDER_virtual/libc")
+ libclocale = d.getVar("PREFERRED_PROVIDER_virtual/libc-locale")
+ if libc != "glibc" or libclocale != "glibc-locale":
+ raise bb.parse.SkipRecipe("glibc-testsuite requires that virtual/libc is glibc")
+}
+
DEPENDS += "glibc-locale libgcc gcc-runtime"
# remove the initial depends
diff --git a/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index 3f1b1c7a5e..1d74ee5198 100644
--- a/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -24,7 +24,7 @@ IMAGE_FSTYPES = "wic.vmdk"
inherit core-image module-base setuptools3
-SRCREV ?= "bb59bcd016bdd815809ac10df45d61c364c46df0"
+SRCREV ?= "8181681b33da272fef83276104d5c7a93f84da46"
SRC_URI = "git://git.yoctoproject.org/poky \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
diff --git a/meta/recipes-core/initrdscripts/initramfs-framework/rootfs b/meta/recipes-core/initrdscripts/initramfs-framework/rootfs
index 76fa84d354..748c9391c0 100644
--- a/meta/recipes-core/initrdscripts/initramfs-framework/rootfs
+++ b/meta/recipes-core/initrdscripts/initramfs-framework/rootfs
@@ -27,8 +27,18 @@ rootfs_run() {
fi
if [ "`echo ${bootparam_root} | cut -c1-9`" = "PARTUUID=" ]; then
- root_uuid=`echo $bootparam_root | cut -c10-`
- bootparam_root="/dev/disk/by-partuuid/$root_uuid"
+ root_partuuid=`echo $bootparam_root | cut -c10-`
+ bootparam_root="/dev/disk/by-partuuid/$root_partuuid"
+ fi
+
+ if [ "`echo ${bootparam_root} | cut -c1-10`" = "PARTLABEL=" ]; then
+ root_partlabel=`echo $bootparam_root | cut -c11-`
+ bootparam_root="/dev/disk/by-partlabel/$root_partlabel"
+ fi
+
+ if [ "`echo ${bootparam_root} | cut -c1-10`" = "PARTLABEL=" ]; then
+ root_partlabel=`echo $bootparam_root | cut -c11-`
+ bootparam_root="/dev/disk/by-partlabel/$root_partlabel"
fi
if [ "`echo ${bootparam_root} | cut -c1-6`" = "LABEL=" ]; then
diff --git a/meta/recipes-core/initscripts/initscripts-1.0/arm/alignment.sh b/meta/recipes-core/initscripts/initscripts-1.0/alignment.sh
index b577b9a03a..b577b9a03a 100644
--- a/meta/recipes-core/initscripts/initscripts-1.0/arm/alignment.sh
+++ b/meta/recipes-core/initscripts/initscripts-1.0/alignment.sh
diff --git a/meta/recipes-core/initscripts/initscripts_1.0.bb b/meta/recipes-core/initscripts/initscripts_1.0.bb
index 91eea4b8c2..1a59b82fbf 100644
--- a/meta/recipes-core/initscripts/initscripts_1.0.bb
+++ b/meta/recipes-core/initscripts/initscripts_1.0.bb
@@ -40,6 +40,7 @@ SRC_URI = "file://functions \
S = "${WORKDIR}"
SRC_URI_append_arm = " file://alignment.sh"
+SRC_URI_append_armeb = " file://alignment.sh"
KERNEL_VERSION = ""
diff --git a/meta/recipes-core/musl/musl/0001-riscv-Define-sigcontext-again.patch b/meta/recipes-core/musl/musl/0001-riscv-Define-sigcontext-again.patch
new file mode 100644
index 0000000000..fcb324e15f
--- /dev/null
+++ b/meta/recipes-core/musl/musl/0001-riscv-Define-sigcontext-again.patch
@@ -0,0 +1,48 @@
+commit a0993f8f0f161423ecdcb754f282ffd2fe47a7b5
+Author: Rich Felker <dalias@aerifal.cx>
+Date: Wed Oct 2 09:28:03 2019 -0400
+
+ reintroduce riscv64 struct sigcontext
+
+ commit ab3eb89a8b83353cdaab12ed017a67a7730f90e9 removed it as part of
+ correcting the mcontext_t definition, but there is still code using
+ struct sigcontext and expecting the member names present in it, most
+ notably libgcc_eh. almost all such usage is incorrect, but bring back
+ struct sigcontext at least for now so as not to introduce regressions.
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+diff --git a/arch/riscv64/bits/signal.h b/arch/riscv64/bits/signal.h
+index 03fe48c1..2ff4be30 100644
+--- a/arch/riscv64/bits/signal.h
++++ b/arch/riscv64/bits/signal.h
+@@ -6,12 +6,6 @@
+ # define SIGSTKSZ 8192
+ #endif
+
+-#if defined(_GNU_SOURCE) || defined(_BSD_SOURCE)
+-typedef unsigned long greg_t;
+-typedef unsigned long gregset_t[32];
+-typedef union __riscv_mc_fp_state fpregset_t;
+-#endif
+-
+ typedef unsigned long __riscv_mc_gp_state[32];
+
+ struct __riscv_mc_f_ext_state {
+@@ -41,6 +35,16 @@ typedef struct mcontext_t {
+ union __riscv_mc_fp_state __fpregs;
+ } mcontext_t;
+
++#if defined(_GNU_SOURCE) || defined(_BSD_SOURCE)
++typedef unsigned long greg_t;
++typedef unsigned long gregset_t[32];
++typedef union __riscv_mc_fp_state fpregset_t;
++struct sigcontext {
++ gregset_t gregs;
++ fpregset_t fpregs;
++};
++#endif
++
+ struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
diff --git a/meta/recipes-core/musl/musl_git.bb b/meta/recipes-core/musl/musl_git.bb
index 9341bf5d58..aacff79f7c 100644
--- a/meta/recipes-core/musl/musl_git.bb
+++ b/meta/recipes-core/musl/musl_git.bb
@@ -4,7 +4,7 @@
require musl.inc
inherit linuxloader
-SRCREV = "6ad514e4e278f0c3b18eb2db1d45638c9af1c07f"
+SRCREV = "2c2477da9a553c0b9b2fa18073a5dcdbe6d395af"
BASEVER = "1.1.23"
@@ -15,6 +15,7 @@ PV = "${BASEVER}+git${SRCPV}"
SRC_URI = "git://git.musl-libc.org/musl \
file://0001-Make-dynamic-linker-a-relative-symlink-to-libc.patch \
file://0002-ldso-Use-syslibdir-and-libdir-as-default-pathes-to-l.patch \
+ file://0001-riscv-Define-sigcontext-again.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/ncurses/files/0001-ncurses-selective-backport-of-20191012-patch.patch b/meta/recipes-core/ncurses/files/0001-ncurses-selective-backport-of-20191012-patch.patch
new file mode 100644
index 0000000000..7870c4ba32
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/0001-ncurses-selective-backport-of-20191012-patch.patch
@@ -0,0 +1,169 @@
+From 064b77f173337aa790f1cec0d741bfbc61a33d31 Mon Sep 17 00:00:00 2001
+From: Trevor Gamblin <trevor.gamblin@windriver.com>
+Date: Fri, 18 Oct 2019 09:57:43 -0400
+Subject: [PATCH] ncurses: selective backport of 20191012 patch
+
+Upstream-Status: Backport [https://salsa.debian.org/debian/ncurses/commit/243908b1e3d81]
+
+Contents of the upstream patch that are not applied to comp_hash.c,
+parse_entry.c, or dump_entry.c have been omitted.
+
+CVE: CVE-2019-17594
+CVE: CVE-2019-17595
+
+Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
+
+---
+ ncurses/tinfo/comp_hash.c | 14 ++++++++++----
+ ncurses/tinfo/parse_entry.c | 32 ++++++++++++++++----------------
+ progs/dump_entry.c | 7 ++++---
+ 3 files changed, 30 insertions(+), 23 deletions(-)
+
+diff --git a/ncurses/tinfo/comp_hash.c b/ncurses/tinfo/comp_hash.c
+index 21f165ca..a62d38f9 100644
+--- a/ncurses/tinfo/comp_hash.c
++++ b/ncurses/tinfo/comp_hash.c
+@@ -44,7 +44,7 @@
+ #include <tic.h>
+ #include <hashsize.h>
+
+-MODULE_ID("$Id: comp_hash.c,v 1.49 2019/03/10 00:06:48 tom Exp $")
++MODULE_ID("$Id: comp_hash.c,v 1.51 2019/10/12 16:32:13 tom Exp $")
+
+ /*
+ * Finds the entry for the given string in the hash table if present.
+@@ -63,7 +63,9 @@ _nc_find_entry(const char *string,
+
+ hashvalue = data->hash_of(string);
+
+- if (data->table_data[hashvalue] >= 0) {
++ if (hashvalue >= 0
++ && (unsigned) hashvalue < data->table_size
++ && data->table_data[hashvalue] >= 0) {
+
+ real_table = _nc_get_table(termcap);
+ ptr = real_table + data->table_data[hashvalue];
+@@ -96,7 +98,9 @@ _nc_find_type_entry(const char *string,
+ const HashData *data = _nc_get_hash_info(termcap);
+ int hashvalue = data->hash_of(string);
+
+- if (data->table_data[hashvalue] >= 0) {
++ if (hashvalue >= 0
++ && (unsigned) hashvalue < data->table_size
++ && data->table_data[hashvalue] >= 0) {
+ const struct name_table_entry *const table = _nc_get_table(termcap);
+
+ ptr = table + data->table_data[hashvalue];
+@@ -124,7 +128,9 @@ _nc_find_user_entry(const char *string)
+
+ hashvalue = data->hash_of(string);
+
+- if (data->table_data[hashvalue] >= 0) {
++ if (hashvalue >= 0
++ && (unsigned) hashvalue < data->table_size
++ && data->table_data[hashvalue] >= 0) {
+
+ real_table = _nc_get_userdefs_table();
+ ptr = real_table + data->table_data[hashvalue];
+diff --git a/ncurses/tinfo/parse_entry.c b/ncurses/tinfo/parse_entry.c
+index f8cca8b5..064376c5 100644
+--- a/ncurses/tinfo/parse_entry.c
++++ b/ncurses/tinfo/parse_entry.c
+@@ -47,7 +47,7 @@
+ #include <ctype.h>
+ #include <tic.h>
+
+-MODULE_ID("$Id: parse_entry.c,v 1.97 2019/08/03 23:10:38 tom Exp $")
++MODULE_ID("$Id: parse_entry.c,v 1.98 2019/10/12 00:50:31 tom Exp $")
+
+ #ifdef LINT
+ static short const parametrized[] =
+@@ -654,12 +654,12 @@ _nc_capcmp(const char *s, const char *t)
+ }
+
+ static void
+-append_acs0(string_desc * dst, int code, int src)
++append_acs0(string_desc * dst, int code, char *src, size_t off)
+ {
+- if (src != 0) {
++ if (src != 0 && off < strlen(src)) {
+ char temp[3];
+ temp[0] = (char) code;
+- temp[1] = (char) src;
++ temp[1] = src[off];
+ temp[2] = 0;
+ _nc_safe_strcat(dst, temp);
+ }
+@@ -669,7 +669,7 @@ static void
+ append_acs(string_desc * dst, int code, char *src)
+ {
+ if (VALID_STRING(src) && strlen(src) == 1) {
+- append_acs0(dst, code, *src);
++ append_acs0(dst, code, src, 0);
+ }
+ }
+
+@@ -1038,17 +1038,17 @@ postprocess_terminfo(TERMTYPE2 *tp)
+ _nc_str_init(&result, buf2, sizeof(buf2));
+ _nc_safe_strcat(&result, acs_chars);
+
+- append_acs0(&result, 'l', box_chars_1[0]); /* ACS_ULCORNER */
+- append_acs0(&result, 'q', box_chars_1[1]); /* ACS_HLINE */
+- append_acs0(&result, 'k', box_chars_1[2]); /* ACS_URCORNER */
+- append_acs0(&result, 'x', box_chars_1[3]); /* ACS_VLINE */
+- append_acs0(&result, 'j', box_chars_1[4]); /* ACS_LRCORNER */
+- append_acs0(&result, 'm', box_chars_1[5]); /* ACS_LLCORNER */
+- append_acs0(&result, 'w', box_chars_1[6]); /* ACS_TTEE */
+- append_acs0(&result, 'u', box_chars_1[7]); /* ACS_RTEE */
+- append_acs0(&result, 'v', box_chars_1[8]); /* ACS_BTEE */
+- append_acs0(&result, 't', box_chars_1[9]); /* ACS_LTEE */
+- append_acs0(&result, 'n', box_chars_1[10]); /* ACS_PLUS */
++ append_acs0(&result, 'l', box_chars_1, 0); /* ACS_ULCORNER */
++ append_acs0(&result, 'q', box_chars_1, 1); /* ACS_HLINE */
++ append_acs0(&result, 'k', box_chars_1, 2); /* ACS_URCORNER */
++ append_acs0(&result, 'x', box_chars_1, 3); /* ACS_VLINE */
++ append_acs0(&result, 'j', box_chars_1, 4); /* ACS_LRCORNER */
++ append_acs0(&result, 'm', box_chars_1, 5); /* ACS_LLCORNER */
++ append_acs0(&result, 'w', box_chars_1, 6); /* ACS_TTEE */
++ append_acs0(&result, 'u', box_chars_1, 7); /* ACS_RTEE */
++ append_acs0(&result, 'v', box_chars_1, 8); /* ACS_BTEE */
++ append_acs0(&result, 't', box_chars_1, 9); /* ACS_LTEE */
++ append_acs0(&result, 'n', box_chars_1, 10); /* ACS_PLUS */
+
+ if (buf2[0]) {
+ acs_chars = _nc_save_str(buf2);
+diff --git a/progs/dump_entry.c b/progs/dump_entry.c
+index d0e420ec..8a47084a 100644
+--- a/progs/dump_entry.c
++++ b/progs/dump_entry.c
+@@ -39,7 +39,7 @@
+ #include "termsort.c" /* this C file is generated */
+ #include <parametrized.h> /* so is this */
+
+-MODULE_ID("$Id: dump_entry.c,v 1.173 2019/05/11 21:02:24 tom Exp $")
++MODULE_ID("$Id: dump_entry.c,v 1.175 2019/10/12 15:59:07 tom Exp $")
+
+ #define DISCARD(string) string = ABSENT_STRING
+ #define PRINTF (void) printf
+@@ -1136,7 +1136,8 @@ fmt_entry(TERMTYPE2 *tterm,
+ *d++ = '\\';
+ *d = ':';
+ } else if (*d == '\\') {
+- *++d = *s++;
++ if ((*++d = *s++) == '\0')
++ break;
+ }
+ d++;
+ *d = '\0';
+@@ -1396,7 +1397,7 @@ one_one_mapping(const char *mapping)
+
+ if (VALID_STRING(mapping)) {
+ int n = 0;
+- while (mapping[n] != '\0') {
++ while (mapping[n] != '\0' && mapping[n + 1] != '\0') {
+ if (isLine(mapping[n]) &&
+ mapping[n] != mapping[n + 1]) {
+ result = FALSE;
+--
+2.17.1
+
diff --git a/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb b/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb
index a44d78e4fe..e638a3737c 100644
--- a/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb
+++ b/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb
@@ -3,6 +3,7 @@ require ncurses.inc
SRC_URI += "file://0001-tic-hang.patch \
file://0002-configure-reproducible.patch \
file://config.cache \
+ file://0001-ncurses-selective-backport-of-20191012-patch.patch \
"
# commit id corresponds to the revision in package version
SRCREV = "3c9b2677c96c645496997321bf2fe465a5e7e21f"
diff --git a/meta/recipes-core/ovmf/ovmf_git.bb b/meta/recipes-core/ovmf/ovmf_git.bb
index b569b593fc..3b5a05e51e 100644
--- a/meta/recipes-core/ovmf/ovmf_git.bb
+++ b/meta/recipes-core/ovmf/ovmf_git.bb
@@ -2,7 +2,7 @@ SUMMARY = "OVMF - UEFI firmware for Qemu and KVM"
DESCRIPTION = "OVMF is an EDK II based project to enable UEFI support for \
Virtual Machines. OVMF contains sample UEFI firmware for QEMU and KVM"
HOMEPAGE = "https://github.com/tianocore/tianocore.github.io/wiki/OVMF"
-LICENSE = "BSD"
+LICENSE = "BSD-2-Clause"
LICENSE_class-target = "${@bb.utils.contains('PACKAGECONFIG', 'secureboot', 'BSD & OpenSSL', 'BSD', d)}"
LIC_FILES_CHKSUM = "file://OvmfPkg/License.txt;md5=06357ddc23f46577c2aeaeaf7b776d65"
diff --git a/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb b/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb
index f5b2d69cef..2a54f1ca3e 100644
--- a/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb
@@ -1,6 +1,8 @@
SUMMARY = "Target packages for the standalone SDK"
PR = "r8"
+PACKAGE_ARCH = "${TUNE_PKGARCH}"
+
inherit packagegroup
RDEPENDS_${PN} = "\
diff --git a/meta/recipes-core/packagegroups/packagegroup-core-tools-debug.bb b/meta/recipes-core/packagegroups/packagegroup-core-tools-debug.bb
index 9fc2b0ef4d..81fbdf4608 100644
--- a/meta/recipes-core/packagegroups/packagegroup-core-tools-debug.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-core-tools-debug.bb
@@ -4,6 +4,8 @@
SUMMARY = "Debugging tools"
+PACKAGE_ARCH = "${TUNE_PKGARCH}"
+
inherit packagegroup
PR = "r3"
diff --git a/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb b/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb
index 9b0ae0d1c2..ee9d0636f2 100644
--- a/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-self-hosted.bb
@@ -6,6 +6,8 @@ SUMMARY = "Self-hosting"
DESCRIPTION = "Packages required to run the build system"
PR = "r13"
+PACKAGE_ARCH = "${TUNE_PKGARCH}"
+
inherit packagegroup distro_features_check
# rdepends on libx11-dev
REQUIRED_DISTRO_FEATURES = "x11"
diff --git a/meta/recipes-core/psplash/files/psplash-init b/meta/recipes-core/psplash/files/psplash-init
index dcb751907f..4bee866b0d 100755
--- a/meta/recipes-core/psplash/files/psplash-init
+++ b/meta/recipes-core/psplash/files/psplash-init
@@ -25,7 +25,7 @@ done
export TMPDIR=/mnt/.psplash
[ -d $TMPDIR ] || mkdir -p $TMPDIR
-if [ ! mountpoint -q $TMPDIR ]; then
+if ! mountpoint -q $TMPDIR; then
mount tmpfs -t tmpfs $TMPDIR -o,size=40k
fi
diff --git a/meta/recipes-core/readline/readline-8.0/rl-native.map b/meta/recipes-core/readline/readline-8.0/rl-native.map
new file mode 100644
index 0000000000..5e7d49cdd2
--- /dev/null
+++ b/meta/recipes-core/readline/readline-8.0/rl-native.map
@@ -0,0 +1,12 @@
+READLINE_6.3 {
+ rl_change_environment;
+ rl_clear_history;
+ rl_executing_key;
+ rl_executing_keyseq;
+ rl_filename_stat_hook;
+ rl_history_substr_search_backward;
+ rl_history_substr_search_forward;
+ rl_input_available_hook;
+ rl_print_last_kbd_macro;
+ rl_signal_event_hook;
+};
diff --git a/meta/recipes-core/readline/readline.inc b/meta/recipes-core/readline/readline.inc
index e9665228dc..07f54a76f1 100644
--- a/meta/recipes-core/readline/readline.inc
+++ b/meta/recipes-core/readline/readline.inc
@@ -43,3 +43,8 @@ do_install_append () {
BBCLASSEXTEND = "native nativesdk"
CONFFILES_${PN} += "${sysconfdir}/inputrc"
+
+# OpenSuse injects versions into libreadline leading to conficits between our native one and theirs
+# see their spec file for where this is injected. Extra versioning is harmless so we just do the same.
+SRC_URI_append_class-native = " file://rl-native.map"
+LDFLAGS_append_class-native = " -Wl,--version-script=${WORKDIR}/rl-native.map" \ No newline at end of file
diff --git a/meta/recipes-core/systemd/systemd-boot_242.bb b/meta/recipes-core/systemd/systemd-boot_243.bb
index 56a25c35ba..515abc289b 100644
--- a/meta/recipes-core/systemd/systemd-boot_242.bb
+++ b/meta/recipes-core/systemd/systemd-boot_243.bb
@@ -1,6 +1,8 @@
require systemd.inc
FILESEXTRAPATHS =. "${FILE_DIRNAME}/systemd:"
+require conf/image-uefi.conf
+
DEPENDS = "intltool-native libcap util-linux gnu-efi gperf-native"
# NOTE: These three patches are in theory not needed, but we haven't
@@ -33,16 +35,13 @@ python __anonymous () {
import re
target = d.getVar('TARGET_ARCH')
prefix = "" if d.getVar('EFI_PROVIDER') == "systemd-boot" else "systemd-"
- if target == "x86_64":
- systemdimage = prefix + "bootx64.efi"
- else:
- systemdimage = prefix + "bootia32.efi"
+ systemdimage = prefix + d.getVar("EFI_BOOT_IMAGE")
d.setVar("SYSTEMD_BOOT_IMAGE", systemdimage)
prefix = "systemd-" if prefix == "" else ""
d.setVar("SYSTEMD_BOOT_IMAGE_PREFIX", prefix)
}
-FILES_${PN} = "/boot/EFI/BOOT/${SYSTEMD_BOOT_IMAGE}"
+FILES_${PN} = "${EFI_FILES_PATH}/${SYSTEMD_BOOT_IMAGE}"
RDEPENDS_${PN} += "virtual/systemd-bootconf"
@@ -61,10 +60,8 @@ do_compile() {
}
do_install() {
- install -d ${D}/boot
- install -d ${D}/boot/EFI
- install -d ${D}/boot/EFI/BOOT
- install ${B}/src/boot/efi/systemd-boot*.efi ${D}/boot/EFI/BOOT/${SYSTEMD_BOOT_IMAGE}
+ install -d ${D}${EFI_FILES_PATH}
+ install ${B}/src/boot/efi/systemd-boot*.efi ${D}${EFI_FILES_PATH}/${SYSTEMD_BOOT_IMAGE}
}
do_deploy () {
diff --git a/meta/recipes-core/systemd/systemd-conf_242.bb b/meta/recipes-core/systemd/systemd-conf_243.bb
index d9ec023bfd..d9ec023bfd 100644
--- a/meta/recipes-core/systemd/systemd-conf_242.bb
+++ b/meta/recipes-core/systemd/systemd-conf_243.bb
diff --git a/meta/recipes-core/systemd/systemd-systemctl/systemctl b/meta/recipes-core/systemd/systemd-systemctl/systemctl
index 8837f54e16..ebac863739 100755
--- a/meta/recipes-core/systemd/systemd-systemctl/systemctl
+++ b/meta/recipes-core/systemd/systemd-systemctl/systemctl
@@ -57,7 +57,7 @@ class SystemdFile():
if skip_re.match(line):
continue
- line = line.rstrip("\n")
+ line = line.strip()
m = section_re.match(line)
if m:
if m.group('section') not in self.sections:
diff --git a/meta/recipes-core/systemd/systemd.inc b/meta/recipes-core/systemd/systemd.inc
index 1912715617..7f3a59c208 100644
--- a/meta/recipes-core/systemd/systemd.inc
+++ b/meta/recipes-core/systemd/systemd.inc
@@ -14,8 +14,8 @@ LICENSE = "GPLv2 & LGPLv2.1"
LIC_FILES_CHKSUM = "file://LICENSE.GPL2;md5=751419260aa954499f7abaabaa882bbe \
file://LICENSE.LGPL2.1;md5=4fbd65380cdd255951079008b364516c"
-SRCREV = "f875dced33462641e1fb7875d2f9a8cd8e8c2fcc"
-SRCBRANCH = "v242-stable"
+SRCREV = "efb536d0cbe2e58f80e501d19999928c75e08f6a"
+SRCBRANCH = "v243-stable"
SRC_URI = "git://github.com/systemd/systemd-stable.git;protocol=git;branch=${SRCBRANCH}"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/systemd/systemd/0001-Replace-the-legacy-ULONG_LONG_MAX-with-the-C99-ULLON.patch b/meta/recipes-core/systemd/systemd/0001-Replace-the-legacy-ULONG_LONG_MAX-with-the-C99-ULLON.patch
deleted file mode 100644
index 6f192a2ad2..0000000000
--- a/meta/recipes-core/systemd/systemd/0001-Replace-the-legacy-ULONG_LONG_MAX-with-the-C99-ULLON.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From f491dfdc88ddbba36a24a75b2063aff027461b83 Mon Sep 17 00:00:00 2001
-From: Adrian Bunk <bunk@stusta.de>
-Date: Thu, 16 May 2019 22:20:07 +0300
-Subject: Replace the legacy ULONG_LONG_MAX with the C99 ULLONG_MAX
-
-Upstream-Status: Backport
-Signed-off-by: Adrian Bunk <bunk@stusta.de>
----
- src/journal-remote/journal-remote-main.c | 4 ++--
- src/shutdown/shutdown.c | 2 +-
- 2 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/src/journal-remote/journal-remote-main.c b/src/journal-remote/journal-remote-main.c
-index 2321a91e7b..d32c51c3ce 100644
---- a/src/journal-remote/journal-remote-main.c
-+++ b/src/journal-remote/journal-remote-main.c
-@@ -528,7 +528,7 @@ static int dispatch_http_event(sd_event_source *event,
- void *userdata) {
- MHDDaemonWrapper *d = userdata;
- int r;
-- MHD_UNSIGNED_LONG_LONG timeout = ULONG_LONG_MAX;
-+ MHD_UNSIGNED_LONG_LONG timeout = ULLONG_MAX;
-
- assert(d);
-
-@@ -538,7 +538,7 @@ static int dispatch_http_event(sd_event_source *event,
- return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
- "MHD_run failed!");
- if (MHD_get_timeout(d->daemon, &timeout) == MHD_NO)
-- timeout = ULONG_LONG_MAX;
-+ timeout = ULLONG_MAX;
-
- r = sd_event_source_set_time(d->timer_event, timeout);
- if (r < 0) {
-diff --git a/src/shutdown/shutdown.c b/src/shutdown/shutdown.c
-index 35b2c2aa46..9623fe9ea2 100644
---- a/src/shutdown/shutdown.c
-+++ b/src/shutdown/shutdown.c
-@@ -212,7 +212,7 @@ static int sync_making_progress(unsigned long long *prev_dirty) {
- }
-
- static void sync_with_progress(void) {
-- unsigned long long dirty = ULONG_LONG_MAX;
-+ unsigned long long dirty = ULLONG_MAX;
- unsigned checks;
- pid_t pid;
- int r;
---
-2.20.1
-
diff --git a/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch b/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch
index 00670ca2b1..73e65ff798 100644
--- a/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch
+++ b/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch
@@ -16,6 +16,8 @@ Upstream-Status: Denied
Signed-off-by: Ross Burton <ross.burton@intel.com>
Signed-off-by: Khem Raj <raj.khem@gmail.com>
Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+[rebased for systemd 243]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
---
units/meson.build | 6 ++----
units/proc-sys-fs-binfmt_misc.automount | 3 +++
@@ -23,10 +25,10 @@ Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/units/meson.build b/units/meson.build
-index d695084..7b1f14a 100644
+index e1ee9f86c3..6bb7771b36 100644
--- a/units/meson.build
+++ b/units/meson.build
-@@ -47,8 +47,7 @@ units = [
+@@ -46,8 +46,7 @@ units = [
['poweroff.target', '',
'runlevel0.target'],
['printer.target', ''],
@@ -36,7 +38,7 @@ index d695084..7b1f14a 100644
['proc-sys-fs-binfmt_misc.mount', 'ENABLE_BINFMT'],
['reboot.target', '',
'runlevel6.target ctrl-alt-del.target'],
-@@ -134,8 +133,7 @@ in_units = [
+@@ -130,8 +129,7 @@ in_units = [
['systemd-ask-password-console.service', ''],
['systemd-ask-password-wall.service', ''],
['systemd-backlight@.service', 'ENABLE_BACKLIGHT'],
@@ -45,9 +47,9 @@ index d695084..7b1f14a 100644
+ ['systemd-binfmt.service', 'ENABLE_BINFMT'],
['systemd-bless-boot.service', 'ENABLE_EFI HAVE_BLKID'],
['systemd-boot-check-no-failures.service', ''],
- ['systemd-coredump@.service', 'ENABLE_COREDUMP'],
+ ['systemd-boot-system-token.service', 'ENABLE_EFI',
diff --git a/units/proc-sys-fs-binfmt_misc.automount b/units/proc-sys-fs-binfmt_misc.automount
-index 30a6bc9..4231f3b 100644
+index 30a6bc9918..4231f3b70f 100644
--- a/units/proc-sys-fs-binfmt_misc.automount
+++ b/units/proc-sys-fs-binfmt_misc.automount
@@ -18,3 +18,6 @@ ConditionPathIsReadWrite=/proc/sys/
@@ -58,7 +60,7 @@ index 30a6bc9..4231f3b 100644
+[Install]
+WantedBy=sysinit.target
diff --git a/units/systemd-binfmt.service.in b/units/systemd-binfmt.service.in
-index e940c7c..6be7f5c 100644
+index e940c7c9ad..6be7f5cc9b 100644
--- a/units/systemd-binfmt.service.in
+++ b/units/systemd-binfmt.service.in
@@ -14,6 +14,7 @@ Documentation=https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.htm
@@ -76,6 +78,3 @@ index e940c7c..6be7f5c 100644
+
+[Install]
+WantedBy=sysinit.target
---
-2.7.4
-
diff --git a/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch b/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch
index 68ca604d6d..2f4daf8665 100644
--- a/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch
+++ b/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch
@@ -16,6 +16,8 @@ Upstream-Status: Inappropriate [musl]
Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
[Rebased for v242]
Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
+[rebased for systemd 243]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
---
src/basic/cgroup-util.c | 14 +++++++-------
src/basic/procfs-util.c | 4 ++--
@@ -30,17 +32,17 @@ Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
src/nspawn/nspawn-cgroup.c | 2 +-
src/nspawn/nspawn.c | 6 +++---
src/shared/sysctl-util.c | 2 +-
- src/sleep/sleep.c | 12 ++++++------
+ src/sleep/sleep.c | 10 +++++-----
src/udev/udevadm-trigger.c | 2 +-
- src/udev/udevd.c | 6 +++---
+ src/udev/udevd.c | 2 +-
src/vconsole/vconsole-setup.c | 2 +-
- 17 files changed, 39 insertions(+), 39 deletions(-)
+ 17 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/src/basic/cgroup-util.c b/src/basic/cgroup-util.c
-index fc28109..44fe985 100644
+index 7b5839ccd6..18f6e8ffc8 100644
--- a/src/basic/cgroup-util.c
+++ b/src/basic/cgroup-util.c
-@@ -822,7 +822,7 @@ int cg_attach(const char *controller, const char *path, pid_t pid) {
+@@ -860,7 +860,7 @@ int cg_attach(const char *controller, const char *path, pid_t pid) {
xsprintf(c, PID_FMT "\n", pid);
@@ -49,7 +51,7 @@ index fc28109..44fe985 100644
if (r < 0)
return r;
-@@ -1104,7 +1104,7 @@ int cg_install_release_agent(const char *controller, const char *agent) {
+@@ -1142,7 +1142,7 @@ int cg_install_release_agent(const char *controller, const char *agent) {
sc = strstrip(contents);
if (isempty(sc)) {
@@ -58,7 +60,7 @@ index fc28109..44fe985 100644
if (r < 0)
return r;
} else if (!path_equal(sc, agent))
-@@ -1122,7 +1122,7 @@ int cg_install_release_agent(const char *controller, const char *agent) {
+@@ -1160,7 +1160,7 @@ int cg_install_release_agent(const char *controller, const char *agent) {
sc = strstrip(contents);
if (streq(sc, "0")) {
@@ -67,7 +69,7 @@ index fc28109..44fe985 100644
if (r < 0)
return r;
-@@ -1149,7 +1149,7 @@ int cg_uninstall_release_agent(const char *controller) {
+@@ -1187,7 +1187,7 @@ int cg_uninstall_release_agent(const char *controller) {
if (r < 0)
return r;
@@ -76,7 +78,7 @@ index fc28109..44fe985 100644
if (r < 0)
return r;
-@@ -1159,7 +1159,7 @@ int cg_uninstall_release_agent(const char *controller) {
+@@ -1197,7 +1197,7 @@ int cg_uninstall_release_agent(const char *controller) {
if (r < 0)
return r;
@@ -85,7 +87,7 @@ index fc28109..44fe985 100644
if (r < 0)
return r;
-@@ -2016,7 +2016,7 @@ int cg_set_attribute(const char *controller, const char *path, const char *attri
+@@ -2053,7 +2053,7 @@ int cg_set_attribute(const char *controller, const char *path, const char *attri
if (r < 0)
return r;
@@ -94,7 +96,7 @@ index fc28109..44fe985 100644
}
int cg_get_attribute(const char *controller, const char *path, const char *attribute, char **ret) {
-@@ -2664,7 +2664,7 @@ int cg_enable_everywhere(
+@@ -2697,7 +2697,7 @@ int cg_enable_everywhere(
return log_debug_errno(errno, "Failed to open cgroup.subtree_control file of %s: %m", p);
}
@@ -104,10 +106,10 @@ index fc28109..44fe985 100644
log_debug_errno(r, "Failed to %s controller %s for %s (%s): %m",
FLAGS_SET(mask, bit) ? "enable" : "disable", n, p, fs);
diff --git a/src/basic/procfs-util.c b/src/basic/procfs-util.c
-index 7aaf95b..25fc3de 100644
+index 42ce53d5aa..57512532a6 100644
--- a/src/basic/procfs-util.c
+++ b/src/basic/procfs-util.c
-@@ -85,13 +85,13 @@ int procfs_tasks_set_limit(uint64_t limit) {
+@@ -86,13 +86,13 @@ int procfs_tasks_set_limit(uint64_t limit) {
* decrease it, as threads-max is the much more relevant sysctl. */
if (limit > pid_max-1) {
sprintf(buffer, "%" PRIu64, limit+1); /* Add one, since PID 0 is not a valid PID */
@@ -124,7 +126,7 @@ index 7aaf95b..25fc3de 100644
uint64_t threads_max;
diff --git a/src/basic/smack-util.c b/src/basic/smack-util.c
-index 123d00e..e7ea78f 100644
+index 123d00e13e..e7ea78f349 100644
--- a/src/basic/smack-util.c
+++ b/src/basic/smack-util.c
@@ -115,7 +115,7 @@ int mac_smack_apply_pid(pid_t pid, const char *label) {
@@ -137,7 +139,7 @@ index 123d00e..e7ea78f 100644
return r;
diff --git a/src/basic/util.c b/src/basic/util.c
-index 93d610b..97dca64 100644
+index 93d610bc98..97dca64f73 100644
--- a/src/basic/util.c
+++ b/src/basic/util.c
@@ -294,7 +294,7 @@ void disable_coredumps(void) {
@@ -150,7 +152,7 @@ index 93d610b..97dca64 100644
log_debug_errno(r, "Failed to turn off coredumps, ignoring: %m");
}
diff --git a/src/binfmt/binfmt.c b/src/binfmt/binfmt.c
-index 66e2f01..5daa43b 100644
+index aa9d811f2e..8c7f2dae7a 100644
--- a/src/binfmt/binfmt.c
+++ b/src/binfmt/binfmt.c
@@ -48,7 +48,7 @@ static int delete_rule(const char *rule) {
@@ -175,16 +177,16 @@ index 66e2f01..5daa43b 100644
}
/* Flush out all rules */
-- write_string_file("/proc/sys/fs/binfmt_misc/status", "-1", WRITE_STRING_FILE_DISABLE_BUFFER);
-+ write_string_file("/proc/sys/fs/binfmt_misc/status", "-1", 0);
+- (void) write_string_file("/proc/sys/fs/binfmt_misc/status", "-1", WRITE_STRING_FILE_DISABLE_BUFFER);
++ (void) write_string_file("/proc/sys/fs/binfmt_misc/status", "-1", 0);
STRV_FOREACH(f, files) {
k = apply_file(*f, true);
diff --git a/src/core/main.c b/src/core/main.c
-index 46db471..726ccaf 100644
+index bcce7178a8..4199cedab9 100644
--- a/src/core/main.c
+++ b/src/core/main.c
-@@ -1469,7 +1469,7 @@ static int bump_unix_max_dgram_qlen(void) {
+@@ -1285,7 +1285,7 @@ static int bump_unix_max_dgram_qlen(void) {
if (v >= DEFAULT_UNIX_MAX_DGRAM_QLEN)
return 0;
@@ -193,7 +195,7 @@ index 46db471..726ccaf 100644
if (r < 0)
return log_full_errno(IN_SET(r, -EROFS, -EPERM, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
"Failed to bump AF_UNIX datagram queue length, ignoring: %m");
-@@ -1684,7 +1684,7 @@ static void initialize_core_pattern(bool skip_setup) {
+@@ -1509,7 +1509,7 @@ static void initialize_core_pattern(bool skip_setup) {
if (getpid_cached() != 1)
return;
@@ -203,10 +205,10 @@ index 46db471..726ccaf 100644
log_warning_errno(r, "Failed to write '%s' to /proc/sys/kernel/core_pattern, ignoring: %m", arg_early_core_pattern);
}
diff --git a/src/core/smack-setup.c b/src/core/smack-setup.c
-index cd7fb01..077e861 100644
+index b95e6239d4..fdbdaaaccb 100644
--- a/src/core/smack-setup.c
+++ b/src/core/smack-setup.c
-@@ -351,17 +351,17 @@ int mac_smack_setup(bool *loaded_policy) {
+@@ -325,17 +325,17 @@ int mac_smack_setup(bool *loaded_policy) {
}
#ifdef SMACK_RUN_LABEL
@@ -229,7 +231,7 @@ index cd7fb01..077e861 100644
log_warning_errno(r, "Failed to set SMACK netlabel rule \"127.0.0.1 -CIPSO\": %m");
#endif
diff --git a/src/hibernate-resume/hibernate-resume.c b/src/hibernate-resume/hibernate-resume.c
-index 17e7cd1..87a7667 100644
+index 17e7cd1a00..87a7667716 100644
--- a/src/hibernate-resume/hibernate-resume.c
+++ b/src/hibernate-resume/hibernate-resume.c
@@ -45,7 +45,7 @@ int main(int argc, char *argv[]) {
@@ -242,10 +244,10 @@ index 17e7cd1..87a7667 100644
log_error_errno(r, "Failed to write '%s' to /sys/power/resume: %m", major_minor);
return EXIT_FAILURE;
diff --git a/src/libsystemd/sd-device/sd-device.c b/src/libsystemd/sd-device/sd-device.c
-index c2315c0..00f81b4 100644
+index c4a7f2f3d3..bcac758284 100644
--- a/src/libsystemd/sd-device/sd-device.c
+++ b/src/libsystemd/sd-device/sd-device.c
-@@ -1852,7 +1852,7 @@ _public_ int sd_device_set_sysattr_value(sd_device *device, const char *sysattr,
+@@ -1849,7 +1849,7 @@ _public_ int sd_device_set_sysattr_value(sd_device *device, const char *sysattr,
if (!value)
return -ENOMEM;
@@ -255,10 +257,10 @@ index c2315c0..00f81b4 100644
if (r == -ELOOP)
return -EINVAL;
diff --git a/src/login/logind-dbus.c b/src/login/logind-dbus.c
-index 2cebcce..7111fc1 100644
+index 30b9a66334..cc1d577933 100644
--- a/src/login/logind-dbus.c
+++ b/src/login/logind-dbus.c
-@@ -1285,7 +1285,7 @@ static int trigger_device(Manager *m, sd_device *d) {
+@@ -1325,7 +1325,7 @@ static int trigger_device(Manager *m, sd_device *d) {
if (!t)
return -ENOMEM;
@@ -268,23 +270,23 @@ index 2cebcce..7111fc1 100644
return 0;
diff --git a/src/nspawn/nspawn-cgroup.c b/src/nspawn/nspawn-cgroup.c
-index 168125d..dd0ab79 100644
+index 0462b46413..7c53d41483 100644
--- a/src/nspawn/nspawn-cgroup.c
+++ b/src/nspawn/nspawn-cgroup.c
-@@ -124,7 +124,7 @@ int sync_cgroup(pid_t pid, CGroupUnified unified_requested, uid_t uid_shift) {
- (void) mkdir_parents(fn, 0755);
+@@ -123,7 +123,7 @@ int sync_cgroup(pid_t pid, CGroupUnified unified_requested, uid_t uid_shift) {
+ fn = strjoina(tree, cgroup, "/cgroup.procs");
sprintf(pid_string, PID_FMT, pid);
-- r = write_string_file(fn, pid_string, WRITE_STRING_FILE_DISABLE_BUFFER);
-+ r = write_string_file(fn, pid_string, 0);
+- r = write_string_file(fn, pid_string, WRITE_STRING_FILE_DISABLE_BUFFER|WRITE_STRING_FILE_MKDIR_0755);
++ r = write_string_file(fn, pid_string, WRITE_STRING_FILE_MKDIR_0755);
if (r < 0) {
log_error_errno(r, "Failed to move process: %m");
goto finish;
diff --git a/src/nspawn/nspawn.c b/src/nspawn/nspawn.c
-index 3b0ecb1..a1b5240 100644
+index 2aec8041f0..841542f2f3 100644
--- a/src/nspawn/nspawn.c
+++ b/src/nspawn/nspawn.c
-@@ -2341,7 +2341,7 @@ static int reset_audit_loginuid(void) {
+@@ -2357,7 +2357,7 @@ static int reset_audit_loginuid(void) {
if (streq(p, "4294967295"))
return 0;
@@ -293,7 +295,7 @@ index 3b0ecb1..a1b5240 100644
if (r < 0) {
log_error_errno(r,
"Failed to reset audit login UID. This probably means that your kernel is too\n"
-@@ -3531,13 +3531,13 @@ static int setup_uid_map(pid_t pid) {
+@@ -3566,13 +3566,13 @@ static int setup_uid_map(pid_t pid) {
xsprintf(uid_map, "/proc/" PID_FMT "/uid_map", pid);
xsprintf(line, UID_FMT " " UID_FMT " " UID_FMT "\n", 0, arg_uid_shift, arg_uid_range);
@@ -310,10 +312,10 @@ index 3b0ecb1..a1b5240 100644
return log_error_errno(r, "Failed to write GID map: %m");
diff --git a/src/shared/sysctl-util.c b/src/shared/sysctl-util.c
-index 9be4055..f935cde 100644
+index 93bdcf11bf..68cddb7a9f 100644
--- a/src/shared/sysctl-util.c
+++ b/src/shared/sysctl-util.c
-@@ -73,7 +73,7 @@ int sysctl_write_ip_property(int af, const char *ifname, const char *property, c
+@@ -88,7 +88,7 @@ int sysctl_write_ip_property(int af, const char *ifname, const char *property, c
log_debug("Setting '%s' to '%s'", p, value);
@@ -323,19 +325,19 @@ index 9be4055..f935cde 100644
int sysctl_read(const char *property, char **content) {
diff --git a/src/sleep/sleep.c b/src/sleep/sleep.c
-index 11aabaf..6aa5d37 100644
+index b9fe96635d..f168d7f890 100644
--- a/src/sleep/sleep.c
+++ b/src/sleep/sleep.c
-@@ -48,7 +48,7 @@ static int write_hibernate_location_info(void) {
+@@ -54,7 +54,7 @@ static int write_hibernate_location_info(void) {
/* if it's a swap partition, we just write the disk to /sys/power/resume */
if (streq(type, "partition")) {
- r = write_string_file("/sys/power/resume", device, WRITE_STRING_FILE_DISABLE_BUFFER);
+ r = write_string_file("/sys/power/resume", device, 0);
if (r < 0)
- return log_debug_errno(r, "Faileed to write partitoin device to /sys/power/resume: %m");
+ return log_debug_errno(r, "Failed to write partition device to /sys/power/resume: %m");
-@@ -84,12 +84,12 @@ static int write_hibernate_location_info(void) {
+@@ -98,14 +98,14 @@ static int write_hibernate_location_info(void) {
offset = fiemap->fm_extents[0].fe_physical / page_size();
xsprintf(offset_str, "%" PRIu64, offset);
@@ -344,13 +346,15 @@ index 11aabaf..6aa5d37 100644
if (r < 0)
return log_debug_errno(r, "Failed to write offset '%s': %m", offset_str);
+ log_debug("Wrote calculated resume_offset value to /sys/power/resume_offset: %s", offset_str);
+
xsprintf(device_str, "%lx", (unsigned long)stb.st_dev);
- r = write_string_file("/sys/power/resume", device_str, WRITE_STRING_FILE_DISABLE_BUFFER);
+ r = write_string_file("/sys/power/resume", device_str, 0);
if (r < 0)
return log_debug_errno(r, "Failed to write device '%s': %m", device_str);
-@@ -103,7 +103,7 @@ static int write_mode(char **modes) {
+@@ -121,7 +121,7 @@ static int write_mode(char **modes) {
STRV_FOREACH(mode, modes) {
int k;
@@ -359,7 +363,7 @@ index 11aabaf..6aa5d37 100644
if (k >= 0)
return 0;
-@@ -122,7 +122,7 @@ static int write_state(FILE **f, char **states) {
+@@ -140,7 +140,7 @@ static int write_state(FILE **f, char **states) {
STRV_FOREACH(state, states) {
int k;
@@ -368,17 +372,8 @@ index 11aabaf..6aa5d37 100644
if (k >= 0)
return 0;
log_debug_errno(k, "Failed to write '%s' to /sys/power/state: %m", *state);
-@@ -217,7 +217,7 @@ static int rtc_write_wake_alarm(uint64_t sec) {
-
- xsprintf(buf, "%" PRIu64, sec);
-
-- r = write_string_file("/sys/class/rtc/rtc0/wakealarm", buf, WRITE_STRING_FILE_DISABLE_BUFFER);
-+ r = write_string_file("/sys/class/rtc/rtc0/wakealarm", buf, 0);
- if (r < 0)
- return log_error_errno(r, "Failed to write '%s' to /sys/class/rtc/rtc0/wakealarm: %m", buf);
-
diff --git a/src/udev/udevadm-trigger.c b/src/udev/udevadm-trigger.c
-index b7dafb7..bab4907 100644
+index 77d95e513f..25ce4abfb1 100644
--- a/src/udev/udevadm-trigger.c
+++ b/src/udev/udevadm-trigger.c
@@ -43,7 +43,7 @@ static int exec_list(sd_device_enumerator *e, const char *action, Set *settle_se
@@ -388,44 +383,26 @@ index b7dafb7..bab4907 100644
- r = write_string_file(filename, action, WRITE_STRING_FILE_DISABLE_BUFFER);
+ r = write_string_file(filename, action, 0);
if (r < 0) {
- log_debug_errno(r, "Failed to write '%s' to '%s', ignoring: %m", action, filename);
- continue;
+ log_full_errno(r == -ENOENT ? LOG_DEBUG : LOG_ERR, r,
+ "Failed to write '%s' to '%s': %m", action, filename);
diff --git a/src/udev/udevd.c b/src/udev/udevd.c
-index 140ec35..33063a9 100644
+index cb5123042a..ea309a9e7f 100644
--- a/src/udev/udevd.c
+++ b/src/udev/udevd.c
-@@ -1185,7 +1185,7 @@ static int synthesize_change(sd_device *dev) {
- */
- log_debug("Device '%s' is closed, synthesising 'change'", devname);
- strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
-- write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
-+ write_string_file(filename, "change", 0);
-
- FOREACH_DEVICE(e, d) {
- const char *t, *n, *s;
-@@ -1200,7 +1200,7 @@ static int synthesize_change(sd_device *dev) {
-
- log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
- strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
-- write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
-+ write_string_file(filename, "change", 0);
- }
-
- return 0;
-@@ -1208,7 +1208,7 @@ static int synthesize_change(sd_device *dev) {
-
- log_debug("Device %s is closed, synthesising 'change'", devname);
- strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
-- write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
-+ write_string_file(filename, "change", 0);
+@@ -1113,7 +1113,7 @@ static int synthesize_change_one(sd_device *dev, const char *syspath) {
+ filename = strjoina(syspath, "/uevent");
+ log_device_debug(dev, "device is closed, synthesising 'change' on %s", syspath);
+- r = write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
++ r = write_string_file(filename, "change", 0);
+ if (r < 0)
+ return log_device_debug_errno(dev, r, "Failed to write 'change' to %s: %m", filename);
return 0;
- }
diff --git a/src/vconsole/vconsole-setup.c b/src/vconsole/vconsole-setup.c
-index 67dc2e4..01b83d0 100644
+index 75d052ae70..5a15c939d8 100644
--- a/src/vconsole/vconsole-setup.c
+++ b/src/vconsole/vconsole-setup.c
-@@ -116,7 +116,7 @@ static int toggle_utf8(const char *name, int fd, bool utf8) {
+@@ -117,7 +117,7 @@ static int toggle_utf8_vc(const char *name, int fd, bool utf8) {
static int toggle_utf8_sysfs(bool utf8) {
int r;
@@ -434,6 +411,3 @@ index 67dc2e4..01b83d0 100644
if (r < 0)
return log_warning_errno(r, "Failed to %s sysfs UTF-8 flag: %m", enable_disable(utf8));
---
-2.11.0
-
diff --git a/meta/recipes-core/systemd/systemd/0001-src-udev-udev-event.c-must-include-sys-wait.h.patch b/meta/recipes-core/systemd/systemd/0001-src-udev-udev-event.c-must-include-sys-wait.h.patch
deleted file mode 100644
index 565deba1d0..0000000000
--- a/meta/recipes-core/systemd/systemd/0001-src-udev-udev-event.c-must-include-sys-wait.h.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 010f917cf222d2c7205584056fe0e4c581a2e1b1 Mon Sep 17 00:00:00 2001
-From: Adrian Bunk <bunk@stusta.de>
-Date: Thu, 16 May 2019 22:09:46 +0300
-Subject: src/udev/udev-event.c must #include <sys/wait.h>
-
-Fixes the following build failure with musl:
-../git/src/udev/udev-event.c: In function 'spawn_wait':
-../git/src/udev/udev-event.c:600:53: error: 'WEXITED' undeclared (first use in this function); did you mean 'WIFEXITED'?
- r = sd_event_add_child(e, NULL, spawn->pid, WEXITED, on_spawn_sigchld, spawn);
- ^~~~~~~
-
-This looks like a bug in udev-event.c that could also have broken
-the compilation after some future glibc header reshuffle.
-
-Upstream-Status: Backport
-Signed-off-by: Adrian Bunk <bunk@stusta.de>
----
- src/udev/udev-event.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/src/udev/udev-event.c b/src/udev/udev-event.c
-index cab1b5ac0c..a0a7ac0f9e 100644
---- a/src/udev/udev-event.c
-+++ b/src/udev/udev-event.c
-@@ -7,6 +7,7 @@
- #include <stddef.h>
- #include <stdio.h>
- #include <stdlib.h>
-+#include <sys/wait.h>
- #include <unistd.h>
-
- #include "sd-event.h"
---
-2.20.1
-
diff --git a/meta/recipes-core/systemd/systemd/0001-unit-file.c-consider-symlink-on-filesystems-like-NFS.patch b/meta/recipes-core/systemd/systemd/0001-unit-file.c-consider-symlink-on-filesystems-like-NFS.patch
new file mode 100644
index 0000000000..ba20a0bb46
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0001-unit-file.c-consider-symlink-on-filesystems-like-NFS.patch
@@ -0,0 +1,42 @@
+From d0122c077d2d8fd0fd29b463c501e7ddf9177ff3 Mon Sep 17 00:00:00 2001
+From: Chen Qi <Qi.Chen@windriver.com>
+Date: Tue, 24 Sep 2019 17:04:50 +0800
+Subject: [PATCH] unit-file.c: consider symlink on filesystems like NFS
+
+Some filesystems do not fully support readdir, according to the manual,
+so we should also consider DT_UNKNOWN to correctly handle symlinks.
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+
+Upstream-Status: Submitted [https://github.com/systemd/systemd/pull/13637]
+---
+ src/shared/unit-file.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/src/shared/unit-file.c b/src/shared/unit-file.c
+index 4a5f23e6c1..8373103000 100644
+--- a/src/shared/unit-file.c
++++ b/src/shared/unit-file.c
+@@ -247,6 +247,7 @@ int unit_file_build_name_map(
+ _cleanup_free_ char *_filename_free = NULL, *simplified = NULL;
+ const char *suffix, *dst = NULL;
+ bool valid_unit_name;
++ struct stat sb;
+
+ valid_unit_name = unit_name_is_valid(de->d_name, UNIT_NAME_ANY);
+
+@@ -279,7 +280,10 @@ int unit_file_build_name_map(
+ if (hashmap_contains(ids, de->d_name))
+ continue;
+
+- if (de->d_type == DT_LNK) {
++ if (de->d_type == DT_LNK ||
++ (de->d_type == DT_UNKNOWN &&
++ lstat(filename, &sb) == 0 &&
++ (sb.st_mode & S_IFMT) == S_IFLNK)) {
+ /* We don't explicitly check for alias loops here. unit_ids_map_get() which
+ * limits the number of hops should be used to access the map. */
+
+--
+2.17.1
+
diff --git a/meta/recipes-core/systemd/systemd/0002-src-login-brightness.c-include-sys-wait.h.patch b/meta/recipes-core/systemd/systemd/0002-src-login-brightness.c-include-sys-wait.h.patch
new file mode 100644
index 0000000000..dcae668dcb
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0002-src-login-brightness.c-include-sys-wait.h.patch
@@ -0,0 +1,25 @@
+Include sys/wait.h
+
+Fixes:
+src/login/logind-brightness.c:158:85: error: 'WEXITED' undeclared (first use in this function); did you mean 'WIFEXITED'?
+ 158 | r = sd_event_add_child(w->manager->event, &w->child_event_source, w->child, WEXITED, on_brightness_writer_exit, w);
+ | ^~~~~~~
+
+Upstream-Status: Pending
+
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+---
+ src/login/logind-brightness.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/login/logind-brightness.c b/src/login/logind-brightness.c
+index 8dfa97d7ae..bddd4a2727 100644
+--- a/src/login/logind-brightness.c
++++ b/src/login/logind-brightness.c
+@@ -1,5 +1,6 @@
+ /* SPDX-License-Identifier: LGPL-2.1+ */
+
++#include <sys/wait.h>
+ #include "bus-util.h"
+ #include "device-util.h"
+ #include "hash-funcs.h"
diff --git a/meta/recipes-core/systemd/systemd/0002-use-lnr-wrapper-instead-of-looking-for-relative-opti.patch b/meta/recipes-core/systemd/systemd/0002-use-lnr-wrapper-instead-of-looking-for-relative-opti.patch
index df5506cc2b..49a334d090 100644
--- a/meta/recipes-core/systemd/systemd/0002-use-lnr-wrapper-instead-of-looking-for-relative-opti.patch
+++ b/meta/recipes-core/systemd/systemd/0002-use-lnr-wrapper-instead-of-looking-for-relative-opti.patch
@@ -11,6 +11,8 @@ Upstream-Status: Inappropriate [OE-Specific]
Signed-off-by: Khem Raj <raj.khem@gmail.com>
Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+[rebased for systemd 243]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
---
meson.build | 4 ----
tools/meson-make-symlink.sh | 3 ++-
@@ -18,10 +20,10 @@ Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
3 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/meson.build b/meson.build
-index 56c98b9..3386546 100644
+index e5ceb1e169..79b762faeb 100644
--- a/meson.build
+++ b/meson.build
-@@ -630,10 +630,6 @@ endforeach
+@@ -579,10 +579,6 @@ endforeach
conf.set_quoted('TELINIT', get_option('telinit-path'))
@@ -33,23 +35,23 @@ index 56c98b9..3386546 100644
gperf = find_program('gperf')
diff --git a/tools/meson-make-symlink.sh b/tools/meson-make-symlink.sh
-index 501cd43..f4e4ac9 100755
+index da0d13a341..90bc0a93c2 100755
--- a/tools/meson-make-symlink.sh
+++ b/tools/meson-make-symlink.sh
@@ -8,5 +8,6 @@ mkdir -vp "$(dirname "${DESTDIR:-}$2")"
if [ "$(dirname $1)" = . ]; then
- ln -vfs -T "$1" "${DESTDIR:-}$2"
+ ln -vfs -T "$1" "${DESTDIR:-}$2"
else
-- ln -vfs -T --relative "${DESTDIR:-}$1" "${DESTDIR:-}$2"
-+ rm -f "${DESTDIR:-}$2"
-+ lnr "${DESTDIR:-}$1" "${DESTDIR:-}$2"
+- ln -vfs -T --relative "${DESTDIR:-}$1" "${DESTDIR:-}$2"
++ rm -f "${DESTDIR:-}$2"
++ lnr "${DESTDIR:-}$1" "${DESTDIR:-}$2"
fi
diff --git a/units/meson-add-wants.sh b/units/meson-add-wants.sh
-index e2b2603..210d604 100755
+index a483d75b86..3c01c523f1 100755
--- a/units/meson-add-wants.sh
+++ b/units/meson-add-wants.sh
@@ -25,4 +25,9 @@ case "$target" in
- ;;
+ ;;
esac
-ln -vfs --relative "$unitpath" "$dir"
@@ -59,6 +61,3 @@ index e2b2603..210d604 100755
+else
+ lnr "$unitpath" "$dir"
+fi
---
-2.7.4
-
diff --git a/meta/recipes-core/systemd/systemd/0003-src-basic-copy.c-include-signal.h.patch b/meta/recipes-core/systemd/systemd/0003-src-basic-copy.c-include-signal.h.patch
new file mode 100644
index 0000000000..7ee0d48fa6
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0003-src-basic-copy.c-include-signal.h.patch
@@ -0,0 +1,27 @@
+Include signal.h
+
+Fixes several signal set related errors:
+src/basic/copy.c:92:19: error: implicit declaration of function 'sigemptyset' [-Werror=implicit-function-declaration]
+src/basic/copy.c:93:19: error: implicit declaration of function 'sigaddset' [-Werror=implicit-function-declaration]
+src/basic/copy.c:93:34: error: 'SIGINT' undeclared (first use in this function)
+src/basic/copy.c:95:13: error: implicit declaration of function 'sigtimedwait' [-Werror=implicit-function-declaration]
+
+Upstream-Status: Pending
+
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+---
+ src/basic/copy.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/basic/copy.c b/src/basic/copy.c
+index ca311e021e..3cf7fc1697 100644
+--- a/src/basic/copy.c
++++ b/src/basic/copy.c
+@@ -12,6 +12,7 @@
+ #include <sys/xattr.h>
+ #include <time.h>
+ #include <unistd.h>
++#include <signal.h>
+
+ #include "alloc-util.h"
+ #include "btrfs-util.h"
diff --git a/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch b/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch
index fa8217e3da..a2aad40ac2 100644
--- a/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch
+++ b/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch
@@ -8,6 +8,8 @@ Upstream-Status: Inappropriate [musl specific]
Signed-off-by: Emil Renner Berthing <systemd@esmil.dk>
Signed-off-by: Khem Raj <raj.khem@gmail.com>
Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+[rebased for systemd 243]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
---
meson.build | 1 +
src/basic/meson.build | 5 +
@@ -20,22 +22,22 @@ Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
create mode 100644 src/basic/parse-printf-format.h
diff --git a/meson.build b/meson.build
-index 79195c9748..0bffbf29c5 100644
+index 79b762faeb..7f8c679411 100644
--- a/meson.build
+++ b/meson.build
-@@ -685,6 +685,7 @@ foreach header : ['crypt.h',
+@@ -613,6 +613,7 @@ endif
+ foreach header : ['crypt.h',
'linux/memfd.h',
'linux/vm_sockets.h',
- 'linux/can/vxcan.h',
+ 'printf.h',
'sys/auxv.h',
'valgrind/memcheck.h',
'valgrind/valgrind.h',
diff --git a/src/basic/meson.build b/src/basic/meson.build
-index 91e0df3d2f..a732b554da 100644
+index d6caf28f14..32c1acf349 100644
--- a/src/basic/meson.build
+++ b/src/basic/meson.build
-@@ -298,6 +298,11 @@ foreach item : [['af', af_list_txt, 'af', ''],
+@@ -312,6 +312,11 @@ foreach item : [['af', af_list_txt, 'af', ''],
endforeach
basic_sources += generated_gperf_headers
@@ -428,6 +430,3 @@ index 5ef11fa1a4..6384ab620c 100644
#define SNDBUF_SIZE (8*1024*1024)
---
-2.11.0
-
diff --git a/meta/recipes-core/systemd/systemd/0004-rules-whitelist-hd-devices.patch b/meta/recipes-core/systemd/systemd/0004-rules-whitelist-hd-devices.patch
index 738f8eb7b6..f9c5996ffb 100644
--- a/meta/recipes-core/systemd/systemd/0004-rules-whitelist-hd-devices.patch
+++ b/meta/recipes-core/systemd/systemd/0004-rules-whitelist-hd-devices.patch
@@ -13,23 +13,22 @@ Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
Signed-off-by: Khem Raj <raj.khem@gmail.com>
[rebased for systemd 241]
Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+[rebased for systemd 243]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
---
rules/60-persistent-storage.rules | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/rules/60-persistent-storage.rules b/rules/60-persistent-storage.rules
-index 1d8880e..e53c8ea 100644
+index 7802b1c94f..c0534ae26a 100644
--- a/rules/60-persistent-storage.rules
+++ b/rules/60-persistent-storage.rules
@@ -7,7 +7,7 @@ ACTION=="remove", GOTO="persistent_storage_end"
ENV{UDEV_DISABLE_PERSISTENT_STORAGE_RULES_FLAG}=="1", GOTO="persistent_storage_end"
SUBSYSTEM!="block", GOTO="persistent_storage_end"
--KERNEL!="loop*|mmcblk*[0-9]|msblk*[0-9]|mspblk*[0-9]|nvme*|sd*|sr*|vd*|xvd*|bcache*|cciss*|dasd*|ubd*|scm*|pmem*|nbd*|zd*", GOTO="persistent_storage_end"
-+KERNEL!="loop*|mmcblk*[0-9]|msblk*[0-9]|mspblk*[0-9]|nvme*|sd*|sr*|vd*|xvd*|bcache*|cciss*|dasd*|ubd*|scm*|pmem*|nbd*|zd*|hd*", GOTO="persistent_storage_end"
+-KERNEL!="loop*|mmcblk*[0-9]|msblk*[0-9]|mspblk*[0-9]|nvme*|sd*|sr*|vd*|xvd*|bcache*|cciss*|dasd*|ubd*|ubi*|scm*|pmem*|nbd*|zd*", GOTO="persistent_storage_end"
++KERNEL!="loop*|mmcblk*[0-9]|msblk*[0-9]|mspblk*[0-9]|nvme*|sd*|sr*|vd*|xvd*|bcache*|cciss*|dasd*|ubd*|ubi*|scm*|pmem*|nbd*|zd*|hd*", GOTO="persistent_storage_end"
# ignore partitions that span the entire disk
TEST=="whole_disk", GOTO="persistent_storage_end"
---
-2.7.4
-
diff --git a/meta/recipes-core/systemd/systemd/0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch b/meta/recipes-core/systemd/systemd/0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch
new file mode 100644
index 0000000000..0f75e8c12d
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch
@@ -0,0 +1,54 @@
+Handle __cpu_mask usage
+
+Fixes errors:
+
+src/test/test-cpu-set-util.c:18:54: error: '__cpu_mask' undeclared (first use in this function)
+src/test/test-sizeof.c:73:14: error: '__cpu_mask' undeclared (first use in this function)
+
+__cpu_mask is an internal type of glibc's cpu_set implementation, not
+part of the POSIX definition, which is problematic when building with
+musl, which does not define a matching type. From inspection of musl's
+sched.h, however, it is clear that the corresponding type would be
+unsigned long, which does match glibc's actual __CPU_MASK_TYPE. So,
+add a typedef to cpu-set-util.h defining __cpu_mask appropriately.
+
+Upstream-Status: Inappropriate [musl specific]
+
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+---
+ src/shared/cpu-set-util.h | 2 ++
+ src/test/test-sizeof.c | 2 +-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/shared/cpu-set-util.h b/src/shared/cpu-set-util.h
+index 27812dfd59..f698f9df83 100644
+--- a/src/shared/cpu-set-util.h
++++ b/src/shared/cpu-set-util.h
+@@ -6,6 +6,8 @@
+ #include "macro.h"
+ #include "missing_syscall.h"
+
++typedef unsigned long __cpu_mask;
++
+ /* This wraps the libc interface with a variable to keep the allocated size. */
+ typedef struct CPUSet {
+ cpu_set_t *set;
+diff --git a/src/test/test-sizeof.c b/src/test/test-sizeof.c
+index a710db5370..d1601ad929 100644
+--- a/src/test/test-sizeof.c
++++ b/src/test/test-sizeof.c
+@@ -1,6 +1,5 @@
+ /* SPDX-License-Identifier: LGPL-2.1+ */
+
+-#include <sched.h>
+ #include <stdio.h>
+ #include <string.h>
+