aboutsummaryrefslogtreecommitdiffstats
path: root/classes
diff options
context:
space:
mode:
Diffstat (limited to 'classes')
-rw-r--r--classes/amend.bbclass38
-rw-r--r--classes/autotools.bbclass38
-rw-r--r--classes/autotools_stage.bbclass5
-rw-r--r--classes/base.bbclass222
-rw-r--r--classes/cmake.bbclass3
-rw-r--r--classes/cpan-base.bbclass2
-rw-r--r--classes/cpan.bbclass15
-rw-r--r--classes/cpan_build.bbclass4
-rw-r--r--classes/cross.bbclass22
-rw-r--r--classes/crosssdk.bbclass2
-rw-r--r--classes/debian.bbclass2
-rw-r--r--classes/distribute_license.bbclass51
-rw-r--r--classes/dsmg600-image.bbclass2
-rw-r--r--classes/gettext.bbclass4
-rw-r--r--classes/gitpkgv.bbclass41
-rw-r--r--classes/gitver.bbclass9
-rw-r--r--classes/glibc-package.bbclass8
-rw-r--r--classes/icecc.bbclass19
-rw-r--r--classes/image.bbclass2
-rw-r--r--classes/insane.bbclass81
-rw-r--r--classes/java-library.bbclass1
-rw-r--r--classes/java-native.bbclass4
-rw-r--r--classes/kernel-arch.bbclass11
-rw-r--r--classes/kernel.bbclass5
-rw-r--r--classes/klibc.bbclass9
-rw-r--r--classes/magicbox-image.bbclass6
-rw-r--r--classes/module-base.bbclass7
-rw-r--r--classes/module_strip.bbclass3
-rw-r--r--classes/mozilla.bbclass2
-rw-r--r--classes/nas100d-image.bbclass2
-rw-r--r--classes/native.bbclass7
-rw-r--r--classes/nativesdk.bbclass7
-rw-r--r--classes/openmoko-base.bbclass2
-rw-r--r--classes/openmoko2.bbclass2
-rw-r--r--classes/package.bbclass34
-rw-r--r--classes/package_ipk.bbclass6
-rw-r--r--classes/packaged-staging.bbclass93
-rw-r--r--classes/patch.bbclass66
-rw-r--r--classes/pkgconfig.bbclass9
-rw-r--r--classes/python-dir.bbclass2
-rw-r--r--classes/qmake_base.bbclass2
-rw-r--r--classes/recipe_sanity.bbclass2
-rw-r--r--classes/relocatable.bbclass21
-rw-r--r--classes/rootfs_ipk.bbclass16
-rw-r--r--classes/sanity.bbclass13
-rw-r--r--classes/shr-mirrors.bbclass13
-rw-r--r--classes/siteinfo.bbclass190
-rw-r--r--classes/sourceipk.bbclass131
-rw-r--r--classes/srctree.bbclass31
-rw-r--r--classes/staging.bbclass3
-rw-r--r--classes/testlab.bbclass10
-rw-r--r--classes/utils.bbclass47
-rw-r--r--classes/xfce46.bbclass2
-rw-r--r--classes/xilinx-bsp.bbclass119
54 files changed, 893 insertions, 555 deletions
diff --git a/classes/amend.bbclass b/classes/amend.bbclass
index bcb93d4e75..2d928286b3 100644
--- a/classes/amend.bbclass
+++ b/classes/amend.bbclass
@@ -14,16 +14,32 @@ python () {
amendfiles = [os.path.join(fpath, "amend.inc")
for fpath in filespath]
- # Adding all amend.incs that can exist to the __depends, to ensure that
- # creating one of them invalidates the bitbake cache. Note that it
- # requires a fix in bitbake. Without the bitbake fix, the cache will be
- # completely invalidated on every bitbake execution.
- depends = d.getVar("__depends", 0) or []
- d.setVar("__depends", depends + [(file, 0) for file in amendfiles if not os.path.exists(file)])
+ newdata = []
+ seen = set()
+ for file in amendfiles:
+ if file in seen:
+ continue
+ seen.add(file)
- existing = (file for file in amendfiles if os.path.exists(file))
- try:
- bb.parse.handle(existing.next(), d, 1)
- except StopIteration:
- pass
+ if os.path.exists(file):
+ bb.parse.handle(file, d, 1)
+ else:
+ # Manually add amend.inc files that don't exist to the __depends, to
+ # ensure that creating them invalidates the bitbake cache for that recipe.
+ newdata.append((file, 0))
+
+ if not newdata:
+ return
+
+ depends = d.getVar("__depends", False)
+ bbversion = tuple(int(i) for i in bb.__version__.split("."))
+ if bbversion < (1, 11, 0):
+ if depends is None:
+ depends = []
+ depends += newdata
+ else:
+ if depends is None:
+ depends = set()
+ depends |= set(newdata)
+ d.setVar("__depends", depends)
}
diff --git a/classes/autotools.bbclass b/classes/autotools.bbclass
index 9bb4f6c43e..b2de2b13f7 100644
--- a/classes/autotools.bbclass
+++ b/classes/autotools.bbclass
@@ -1,7 +1,7 @@
# use autotools_stage_all for native packages
AUTOTOOLS_NATIVE_STAGE_INSTALL = "1"
-def autotools_dep_prepend(d):
+def autotools_deps(d):
if bb.data.getVar('INHIBIT_AUTOTOOLS_DEPS', d, 1):
return ''
@@ -24,15 +24,30 @@ def autotools_dep_prepend(d):
EXTRA_OEMAKE = ""
-DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
-DEPENDS_virtclass-native_prepend = "${@autotools_dep_prepend(d)}"
-DEPENDS_virtclass-nativesdk_prepend = "${@autotools_dep_prepend(d)}"
+DEPENDS_prepend = "${@autotools_deps(d)}"
+DEPENDS_virtclass-native_prepend = "${@autotools_deps(d)}"
+DEPENDS_virtclass-nativesdk_prepend = "${@autotools_deps(d)}"
inherit siteinfo
+def _autotools_get_sitefiles(d):
+ def inherits(d, *classes):
+ if any(bb.data.inherits_class(cls, d) for cls in classes):
+ return True
+
+ if inherits(d, "native", "nativesdk"):
+ return
+
+ sitedata = siteinfo_data(d)
+ for path in d.getVar("BBPATH", True).split(":"):
+ for element in sitedata:
+ filename = os.path.join(path, "site", element)
+ if os.path.exists(filename):
+ yield filename
+
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+export CONFIG_SITE = "${@' '.join(_autotools_get_sitefiles(d))}"
acpaths = "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
@@ -64,12 +79,7 @@ CONFIGUREOPTS = " --build=${BUILD_SYS} \
oe_runconf () {
if [ -x ${S}/configure ] ; then
- cfgcmd="${S}/configure \
- ${CONFIGUREOPTS} \
- ${EXTRA_OECONF} \
- $@"
- oenote "Running $cfgcmd..."
- $cfgcmd || oefatal "oe_runconf failed"
+ ${S}/configure ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
else
oefatal "no configure script found"
fi
@@ -130,7 +140,11 @@ autotools_do_configure() {
echo "no" | glib-gettextize --force --copy
fi
else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
- cp ${STAGING_DATADIR}/gettext/config.rpath ${S}/
+ if [ -e ${STAGING_DATADIR}/gettext/config.rpath ]; then
+ cp ${STAGING_DATADIR}/gettext/config.rpath ${S}/
+ else
+ oenote ${STAGING_DATADIR}/gettext/config.rpath not found. gettext is not installed.
+ fi
fi
fi
diff --git a/classes/autotools_stage.bbclass b/classes/autotools_stage.bbclass
deleted file mode 100644
index ff0f4cd880..0000000000
--- a/classes/autotools_stage.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-inherit autotools
-
-do_stage () {
- autotools_stage_all
-}
diff --git a/classes/base.bbclass b/classes/base.bbclass
index 3c854c6e7b..299e875191 100644
--- a/classes/base.bbclass
+++ b/classes/base.bbclass
@@ -9,31 +9,27 @@ inherit utils
inherit utility-tasks
inherit metadata_scm
-python sys_path_eh () {
- if isinstance(e, bb.event.ConfigParsed):
- import sys
- import os
- import time
+OE_IMPORTS += "oe.path oe.utils sys os time"
+python oe_import () {
+ if isinstance(e, bb.event.ConfigParsed):
+ import os, sys
bbpath = e.data.getVar("BBPATH", True).split(":")
sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
def inject(name, value):
- """Make a python object accessible from everywhere for the metadata"""
+ """Make a python object accessible from the metadata"""
if hasattr(bb.utils, "_context"):
bb.utils._context[name] = value
else:
__builtins__[name] = value
- import oe.path
- import oe.utils
- inject("bb", bb)
- inject("sys", sys)
- inject("time", time)
- inject("oe", oe)
+ for toimport in e.data.getVar("OE_IMPORTS", True).split():
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
}
-addhandler sys_path_eh
+addhandler oe_import
die() {
oefatal "$*"
@@ -57,7 +53,7 @@ oe_runmake() {
${MAKE} ${EXTRA_OEMAKE} "$@" || die "oe_runmake failed"
}
-def base_dep_prepend(d):
+def base_deps(d):
#
# Ideally this will check a flag so we will operate properly in
# the case where host == build == target, for now we don't work in
@@ -75,11 +71,17 @@ def base_dep_prepend(d):
if (bb.data.getVar('HOST_SYS', d, 1) !=
bb.data.getVar('BUILD_SYS', d, 1)):
deps += " virtual/${TARGET_PREFIX}gcc virtual/libc "
+ elif bb.data.inherits_class('native', d) and \
+ bb.data.getVar('PN', d, True) not in \
+ ("linux-libc-headers-native", "quilt-native",
+ "unifdef-native", "shasum-native",
+ "stagemanager-native", "coreutils-native"):
+ deps += " linux-libc-headers-native"
return deps
-DEPENDS_prepend="${@base_dep_prepend(d)} "
-DEPENDS_virtclass-native_prepend="${@base_dep_prepend(d)} "
-DEPENDS_virtclass-nativesdk_prepend="${@base_dep_prepend(d)} "
+DEPENDS_prepend="${@base_deps(d)} "
+DEPENDS_virtclass-native_prepend="${@base_deps(d)} "
+DEPENDS_virtclass-nativesdk_prepend="${@base_deps(d)} "
SCENEFUNCS += "base_scenefunction"
@@ -161,108 +163,68 @@ python base_do_fetch() {
raise bb.build.FuncFailed("Checksum of '%s' failed" % uri)
}
-def oe_unpack_file(file, data, url = None):
- import subprocess
- if not url:
- url = "file://%s" % file
- dots = file.split(".")
- if dots[-1] in ['gz', 'bz2', 'Z']:
- efile = os.path.join(bb.data.getVar('WORKDIR', data, 1),os.path.basename('.'.join(dots[0:-1])))
- else:
- efile = file
- cmd = None
- if file.endswith('.tar'):
- cmd = 'tar x --no-same-owner -f %s' % file
- elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
- cmd = 'tar xz --no-same-owner -f %s' % file
- elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'):
- cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
- elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
- cmd = 'gzip -dc %s > %s' % (file, efile)
- elif file.endswith('.bz2'):
- cmd = 'bzip2 -dc %s > %s' % (file, efile)
- elif file.endswith('.tar.xz'):
- cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
- elif file.endswith('.xz'):
- cmd = 'xz -dc %s > %s' % (file, efile)
- elif file.endswith('.zip') or file.endswith('.jar'):
- cmd = 'unzip -q -o'
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if 'dos' in parm:
- cmd = '%s -a' % cmd
- cmd = "%s '%s'" % (cmd, file)
- elif os.path.isdir(file):
- destdir = "."
- filespath = bb.data.getVar("FILESPATH", data, 1).split(":")
- for fp in filespath:
- if file[0:len(fp)] == fp:
- destdir = file[len(fp):file.rfind('/')]
- destdir = destdir.strip('/')
- if len(destdir) < 1:
- destdir = "."
- elif not os.access("%s/%s" % (os.getcwd(), destdir), os.F_OK):
- os.makedirs("%s/%s" % (os.getcwd(), destdir))
- break
-
- cmd = 'cp -pPR %s %s/%s/' % (file, os.getcwd(), destdir)
- else:
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if not 'patch' in parm:
- # The "destdir" handling was specifically done for FILESPATH
- # items. So, only do so for file:// entries.
- if type == "file":
- destdir = bb.decodeurl(url)[1] or "."
- else:
- destdir = "."
- bb.mkdirhier("%s/%s" % (os.getcwd(), destdir))
- cmd = 'cp %s %s/%s/' % (file, os.getcwd(), destdir)
-
- if not cmd:
- return True
-
- dest = os.path.join(os.getcwd(), os.path.basename(file))
- if os.path.exists(dest):
- if os.path.samefile(file, dest):
- return True
-
- # Change to subdir before executing command
- save_cwd = os.getcwd();
- parm = bb.decodeurl(url)[5]
- if 'subdir' in parm:
- newdir = ("%s/%s" % (os.getcwd(), parm['subdir']))
- bb.mkdirhier(newdir)
- os.chdir(newdir)
-
- cmd = "PATH=\"%s\" %s" % (bb.data.getVar('PATH', data, 1), cmd)
- bb.note("Unpacking %s to %s/" % (base_path_out(file, data), base_path_out(os.getcwd(), data)))
- ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)
-
- os.chdir(save_cwd)
-
- return ret == 0
+def oe_unpack(d, local, urldata):
+ from oe.unpack import unpack_file, is_patch, UnpackError
+ if is_patch(local, urldata.parm):
+ return
+
+ subdirs = []
+ if "subdir" in urldata.parm:
+ subdirs.append(urldata.parm["subdir"])
+
+ if urldata.type == "file":
+ if not urldata.host:
+ urlpath = urldata.path
+ else:
+ urlpath = oe.path.join(urldata.host, urldata.path)
+
+ if not os.path.isabs(urlpath):
+ subdirs.append(os.path.dirname(urlpath))
+
+ workdir = d.getVar("WORKDIR", True)
+ if subdirs:
+ destdir = oe.path.join(workdir, *subdirs)
+ bb.mkdirhier(destdir)
+ else:
+ destdir = workdir
+ dos = urldata.parm.get("dos")
+
+ bb.note("Unpacking %s to %s/" % (base_path_out(local, d),
+ base_path_out(destdir, d)))
+ try:
+ unpack_file(local, destdir, env={"PATH": d.getVar("PATH", True)}, dos=dos)
+ except UnpackError, exc:
+ bb.fatal(str(exc))
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
python base_do_unpack() {
- import re
+ from glob import glob
+
+ srcurldata = bb.fetch.init(d.getVar("SRC_URI", True).split(), d, True)
+ filespath = d.getVar("FILESPATH", True).split(":")
+
+ for url, urldata in srcurldata.iteritems():
+ if urldata.type == "file" and "*" in urldata.path:
+ # The fetch code doesn't know how to handle globs, so
+ # we need to handle the local bits ourselves
+ for path in filespath:
+ srcdir = oe.path.join(path, urldata.host,
+ os.path.dirname(urldata.path))
+ if os.path.exists(srcdir):
+ break
+ else:
+ bb.fatal("Unable to locate files for %s" % url)
- localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
+ for filename in glob(oe.path.join(srcdir,
+ os.path.basename(urldata.path))):
+ oe_unpack(d, filename, urldata)
+ else:
+ local = urldata.localpath
+ if not local:
+ raise bb.build.FuncFailed('Unable to locate local file for %s' % url)
- src_uri = bb.data.getVar('SRC_URI', localdata, True)
- if not src_uri:
- return
- for url in src_uri.split():
- try:
- local = bb.data.expand(bb.fetch.localpath(url, localdata), localdata)
- except bb.MalformedUrl, e:
- raise bb.build.FuncFailed('Unable to generate local path for malformed uri: %s' % e)
- if not local:
- raise bb.build.FuncFailed('Unable to locate local file for %s' % url)
- local = os.path.realpath(local)
- ret = oe_unpack_file(local, localdata, url)
- if not ret:
- raise bb.build.FuncFailed()
+ oe_unpack(d, local, urldata)
}
addhandler base_eventhandler
@@ -333,7 +295,7 @@ base_do_configure() {
addtask compile after do_configure
do_compile[dirs] = "${S} ${B}"
base_do_compile() {
- if [ -e Makefile -o -e makefile ]; then
+ if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
oe_runmake || die "make failed"
else
oenote "nothing to compile"
@@ -374,7 +336,16 @@ python () {
import re
this_machine = bb.data.getVar('MACHINE', d, 1)
if this_machine and not re.match(need_machine, this_machine):
- raise bb.parse.SkipPackage("incompatible with machine %s" % this_machine)
+ this_soc_family = bb.data.getVar('SOC_FAMILY', d, 1)
+ if this_soc_family and not re.match(need_machine, this_soc_family):
+ raise bb.parse.SkipPackage("incompatible with machine %s" % this_machine)
+
+ need_target = bb.data.getVar('COMPATIBLE_TARGET_SYS', d, 1)
+ if need_target:
+ import re
+ this_target = bb.data.getVar('TARGET_SYS', d, 1)
+ if this_target and not re.match(need_target, this_target):
+ raise bb.parse.SkipPackage("incompatible with target system %s" % this_target)
pn = bb.data.getVar('PN', d, 1)
@@ -418,23 +389,10 @@ python () {
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
#
override = bb.data.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', d, 1)
- if override != '0':
- paths = []
- for p in [ "${PF}", "${P}", "${PN}", "files", "" ]:
- path = bb.data.expand(os.path.join("${FILE_DIRNAME}", p, "${MACHINE}"), d)
- if os.path.isdir(path):
- paths.append(path)
- if len(paths) != 0:
- for s in srcuri.split():
- if not s.startswith("file://"):
- continue
- local = bb.data.expand(bb.fetch.localpath(s, d), d)
- for mp in paths:
- if local.startswith(mp):
- #bb.note("overriding PACKAGE_ARCH from %s to %s" % (pkg_arch, mach_arch))
- bb.data.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}", d)
- bb.data.setVar('MULTIMACH_ARCH', mach_arch, d)
- return
+ if override != '0' and is_machine_specific(d):
+ bb.data.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}", d)
+ bb.data.setVar('MULTIMACH_ARCH', mach_arch, d)
+ return
multiarch = pkg_arch
diff --git a/classes/cmake.bbclass b/classes/cmake.bbclass
index 2047b58b76..29248e6fe1 100644
--- a/classes/cmake.bbclass
+++ b/classes/cmake.bbclass
@@ -28,6 +28,7 @@ cmake_do_generate_toolchain_file() {
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
echo "set( CMAKE_SYSTEM_NAME" `echo ${SDK_OS} | sed 's/^./\u&/'` ")" > ${WORKDIR}/toolchain.cmake
+ echo "set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )" >> ${WORKDIR}/toolchain.cmake
echo "set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )" >> ${WORKDIR}/toolchain.cmake
echo "set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )" >> ${WORKDIR}/toolchain.cmake
echo "set( CMAKE_C_FLAGS \"${OECMAKE_C_FLAGS}\" CACHE STRING \"OpenEmbedded CFLAGS\" )" >> ${WORKDIR}/toolchain.cmake
@@ -37,7 +38,7 @@ cmake_do_generate_toolchain_file() {
# only search in the paths provided (from openembedded) so cmake doesnt pick
# up libraries and tools from the native build machine
- echo "set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} )" >> ${WORKDIR}/toolchain.cmake
+ echo "set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${STAGING_DIR_NATIVE}${prefix_native}/${BASE_PACKAGE_ARCH} )" >> ${WORKDIR}/toolchain.cmake
echo "set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )" >> ${WORKDIR}/toolchain.cmake
echo "set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )" >> ${WORKDIR}/toolchain.cmake
echo "set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )" >> ${WORKDIR}/toolchain.cmake
diff --git a/classes/cpan-base.bbclass b/classes/cpan-base.bbclass
index d65b5d944a..379defadb2 100644
--- a/classes/cpan-base.bbclass
+++ b/classes/cpan-base.bbclass
@@ -10,7 +10,7 @@ RDEPENDS += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
# Determine the staged version of perl from the perl configuration file
def get_perl_version(d):
import re
- cfg = bb.data.expand('${STAGING_LIBDIR}/perl/config.sh', d)
+ cfg = bb.data.expand('${STAGING_LIBDIR}/perl/config.sh', d)
try:
f = open(cfg, 'r')
except IOError:
diff --git a/classes/cpan.bbclass b/classes/cpan.bbclass
index dfc092601e..24e0c6e326 100644
--- a/classes/cpan.bbclass
+++ b/classes/cpan.bbclass
@@ -13,6 +13,8 @@ export PERL_INC = "${STAGING_LIBDIR}/perl/${@get_perl_version(d)}/CORE"
export PERL_LIB = "${STAGING_DATADIR}/perl/${@get_perl_version(d)}"
export PERL_ARCHLIB = "${STAGING_LIBDIR}/perl/${@get_perl_version(d)}"
+NATIVE_INSTALL_WORKS = "1"
+
cpan_do_configure () {
yes '' | perl Makefile.PL ${EXTRA_CPANFLAGS}
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
@@ -31,16 +33,7 @@ cpan_do_compile () {
}
cpan_do_install () {
- if [ ${@is_target(d)} = "yes" ]; then
- oe_runmake install_vendor
- fi
-}
-
-cpan_do_stage () {
- if [ ${@is_target(d)} = "no" ]; then
- oe_runmake install_vendor
- fi
+ oe_runmake DESTDIR="${D}" install_vendor
}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install do_stage
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/classes/cpan_build.bbclass b/classes/cpan_build.bbclass
index 56f58168c7..15c93fb56d 100644
--- a/classes/cpan_build.bbclass
+++ b/classes/cpan_build.bbclass
@@ -11,7 +11,7 @@ NATIVE_INSTALL_WORKS = "1"
# this class, but uses itself as the provider of
# libmodule-build-perl)
#
-def cpan_build_dep_prepend(d):
+def cpan_build_deps(d):
if bb.data.getVar('CPAN_BUILD_DEPS', d, 1):
return ''
pn = bb.data.getVar('PN', d, 1)
@@ -19,7 +19,7 @@ def cpan_build_dep_prepend(d):
return ''
return 'libmodule-build-perl-native '
-DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}"
+DEPENDS_prepend = "${@cpan_build_deps(d)}"
cpan_build_do_configure () {
if [ ${@is_target(d)} == "yes" ]; then
diff --git a/classes/cross.bbclass b/classes/cross.bbclass
index 69f6511753..d3b27e6cf0 100644
--- a/classes/cross.bbclass
+++ b/classes/cross.bbclass
@@ -1,5 +1,5 @@
-# Disabled for now since the relocation paths are too long
-#inherit relocatable
+# We want cross packages to be relocatable
+inherit relocatable
# Cross packages are built indirectly via dependency,
# no need for them to be a direct target of 'world'
@@ -17,8 +17,6 @@ PACKAGE_ARCH = "${OLD_PACKAGE_ARCH}"
OLD_BASE_PACKAGE_ARCH := "${BASE_PACKAGE_ARCH}"
BASE_PACKAGE_ARCH = "${OLD_BASE_PACKAGE_ARCH}"
-PACKAGES = ""
-
HOST_ARCH = "${BUILD_ARCH}"
HOST_VENDOR = "${BUILD_VENDOR}"
HOST_OS = "${BUILD_OS}"
@@ -46,20 +44,20 @@ target_base_libdir := "${base_libdir}"
target_prefix := "${prefix}"
# Overrides for paths
-prefix = "${CROSS_DIR}"
-base_prefix = "${prefix}"
+base_prefix = "${STAGING_DIR_NATIVE}"
+prefix = "${base_prefix}${prefix_native}/${BASE_PACKAGE_ARCH}"
exec_prefix = "${prefix}"
base_sbindir = "${base_prefix}/bin"
sbindir = "${exec_prefix}/bin"
+# staging should be special for cross
+STAGING_DIR_HOST = ""
+SHLIBSDIR = "${STAGING_DIR_NATIVE}/shlibs"
+
do_install () {
oe_runmake 'DESTDIR=${D}' install
}
-do_stage () {
- autotools_stage_all
-}
-
#
# Override the default sysroot staging copy since this won't look like a target system
#
@@ -77,5 +75,7 @@ sysroot_stage_all() {
#
# Cross .la files have more path issues we have to correct
-SYSROOTEXTRALIBDIRSED = '-e "/^libdir=/s,.*,libdir=${STAGING_DIR_TARGET}${target_libdir},g"'
+SYSROOTEXTRALIBDIRSED = '-e "/^libdir=/s,.*,libdir=${STAGING_DIR_TARGET}${target_libdir},g" \
+ -e "/^dependency_libs=/s,\([[:space:]']\)-L${STAGING_LIBDIR_NATIVE},,g" \
+'
diff --git a/classes/crosssdk.bbclass b/classes/crosssdk.bbclass
index 0b1af8fc2f..452df4cd35 100644
--- a/classes/crosssdk.bbclass
+++ b/classes/crosssdk.bbclass
@@ -12,4 +12,4 @@ TARGET_OS = "${SDK_OS}"
TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
-
+STAGING_DIR_HOST = "${STAGING_DIR_SDK}"
diff --git a/classes/debian.bbclass b/classes/debian.bbclass
index 8f6e7d88cf..1a2c7da441 100644
--- a/classes/debian.bbclass
+++ b/classes/debian.bbclass
@@ -54,7 +54,7 @@ python debian_package_name_hook () {
for f in files:
if so_re.match(f):
fp = os.path.join(root, f)
- cmd = (bb.data.getVar('BUILD_PREFIX', d, 1) or "") + "objdump -p " + fp + " 2>/dev/null"
+ cmd = "PATH=" + bb.data.getVar('PATH', d, 1) + " " + (bb.data.getVar('TARGET_PREFIX', d, 1) or "") + "objdump -p " + fp + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
diff --git a/classes/distribute_license.bbclass b/classes/distribute_license.bbclass
new file mode 100644
index 0000000000..b9bde772c6
--- /dev/null
+++ b/classes/distribute_license.bbclass
@@ -0,0 +1,51 @@
+# distribute-license.bbclass will search the sources of a package to
+# a given depth looking for a match to the specified pattern and if
+# found will copy the matching file(s) to the deploy directory.
+#
+# This class is used to collect license files such as COPYING or
+# LICENSE where they are found and save them per package.
+#
+# This package uses the following variables to control its operations:
+# - LICENSE_FILES = Pattern of license files to be searched for.
+# By default this is COPYING* and LICENSE* but
+# this can be changed per package.
+# - LICENSE_SEARCH_DEPTH = The maximum depth to search in the package
+# sources for files matching the LICENSE_FILES
+# pattern.
+
+
+# Files to copy for the licensing. By default this is looking for
+# files following the patters COPYING* or LICENSING* in the top
+# level sources directory.
+LICENSE_FILES ?= "COPYING* LICENSE*"
+
+# Maximum depth to look for license files
+LICENSE_SEARCH_DEPTH ?= "1"
+
+distribute_license_do_copy_license() {
+ # Turn off globbing so that wildcards are not expanded in for loop
+ set -f
+
+ # Check if LICENSE_FILES exist. If so copy them to DEPLOY_DIR
+ # Keep the relative path of licenses the same in the DEPLOY_DIR
+ for lic in ${LICENSE_FILES}
+ do
+ find ${S} -maxdepth ${LICENSE_SEARCH_DEPTH} -name "$lic" | \
+ while read f
+ do
+ bn=$(basename $f)
+ bd=$(dirname $f | sed -e "s|${S}||")
+ install -D $f ${DEPLOY_DIR}/licenses/${PN}/$bd/$bn
+ done
+ done
+
+ # Turn globbing back on
+ set +f
+}
+
+EXPORT_FUNCTIONS do_copy_license
+
+# Put after do_patch in case a patch adds the license files
+do_copy_license[deptask] = "do_patch"
+
+addtask copy_license after do_patch before do_configure
diff --git a/classes/dsmg600-image.bbclass b/classes/dsmg600-image.bbclass
index 16c6b6eeb0..86ddebee99 100644
--- a/classes/dsmg600-image.bbclass
+++ b/classes/dsmg600-image.bbclass
@@ -1,6 +1,6 @@
dsmg600_pack_image () {
install -d ${DEPLOY_DIR_IMAGE}/firmupgrade
- install -m 0755 ${DEPLOY_DIR_IMAGE}/zImage-nslu2${SITEINFO_ENDIANESS}.bin \
+ install -m 0755 ${DEPLOY_DIR_IMAGE}/zImage-nslu2${SITEINFO_ENDIANNESS}.bin \
${DEPLOY_DIR_IMAGE}/firmupgrade/ip-ramdisk
install -m 0644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 \
${DEPLOY_DIR_IMAGE}/firmupgrade/rootfs.gz
diff --git a/classes/gettext.bbclass b/classes/gettext.bbclass
index a40e74f819..968531b4ed 100644
--- a/classes/gettext.bbclass
+++ b/classes/gettext.bbclass
@@ -4,14 +4,14 @@ def gettext_after_parse(d):
cfg = oe_filter_out('^--(dis|en)able-nls$', bb.data.getVar('EXTRA_OECONF', d, 1) or "", d)
cfg += " --disable-nls"
depends = bb.data.getVar('DEPENDS', d, 1) or ""
- bb.data.setVar('DEPENDS', oe_filter_out('^(virtual/libiconv|virtual/libintl)$', depends, d), d)
+ bb.data.setVar('DEPENDS', oe_filter_out('^(gettext|gettext-native)$', depends, d), d)
bb.data.setVar('EXTRA_OECONF', cfg, d)
python () {
gettext_after_parse(d)
}
-DEPENDS_GETTEXT = "gettext gettext-native"
+DEPENDS_GETTEXT = "gettext gettext-native virtual/libiconv virtual/libintl"
DEPENDS =+ "${DEPENDS_GETTEXT}"
EXTRA_OECONF += "--enable-nls"
diff --git a/classes/gitpkgv.bbclass b/classes/gitpkgv.bbclass
new file mode 100644
index 0000000000..bc1dc32561
--- /dev/null
+++ b/classes/gitpkgv.bbclass
@@ -0,0 +1,41 @@
+# gitpkgv.bbclass provides a GITPKGV variable which is a sortable version
+# with the format NN+GITHASH, to be used in PKGV, where
+#
+# NN equals the total number of revs up to SRCREV
+# GITHASH is SRCREV's (full) hash
+#
+# gitpkgv.bbclass assumes the git repository has been cloned, and contains
+# SRCREV. So ${GITPKGV} should never be used in PV, only in PKGV.
+# It can handle SRCREV = ${AUTOREV}, as well as SRCREV = "<some fixed git hash>"
+#
+# use example:
+#
+# inherit gitpkgv
+#
+# PV = "1.0+git${SRCPV}"
+# PKGV = "1.0+git${GITPKGV}"
+
+GITPKGV = "${@get_git_pkgv(d)}"
+
+def get_git_pkgv(d):
+ import os
+ import bb
+
+ urls = bb.data.getVar('SRC_URI', d, 1).split()
+
+ for url in urls:
+ (type, host, path, user, pswd, parm) = bb.decodeurl(bb.data.expand(url, d))
+ if type in ['git']:
+
+ gitsrcname = '%s%s' % (host, path.replace('/', '.'))
+ repodir = os.path.join(bb.data.expand('${GITDIR}', d), gitsrcname)
+ rev = bb.fetch.get_srcrev(d).split('+')[1]
+
+ cwd = os.getcwd()
+ os.chdir(repodir)
+ output = bb.fetch.runfetchcmd("git rev-list %s -- 2> /dev/null | wc -l" % rev, d, quiet=True)
+ os.chdir(cwd)
+
+ return "%s+%s" % (output.split()[0], rev)
+
+ return "0+0"
diff --git a/classes/gitver.bbclass b/classes/gitver.bbclass
index 5b4ba8d1e1..445067ec80 100644
--- a/classes/gitver.bbclass
+++ b/classes/gitver.bbclass
@@ -38,9 +38,12 @@ def get_git_pv(path, d, tagadjust=None):
mark_dependency(d, os.path.join(gitdir, "HEAD"))
ref = popen(["git", "symbolic-ref", "HEAD"])
- reffile = os.path.join(gitdir, ref)
- if ref and os.path.exists(reffile):
- mark_dependency(d, reffile)
+ if ref:
+ reffile = os.path.join(gitdir, ref)
+ if os.path.exists(reffile):
+ mark_dependency(d, reffile)
+ else:
+ mark_dependency(d, os.path.join(gitdir, "index"))
else:
# The ref might be hidden in packed-refs. Force a reparse if anything
# in the working copy changes.
diff --git a/classes/glibc-package.bbclass b/classes/glibc-package.bbclass
index d47c914b36..36424d50f0 100644
--- a/classes/glibc-package.bbclass
+++ b/classes/glibc-package.bbclass
@@ -104,11 +104,11 @@ do_prep_locale_tree() {
gunzip $i
done
ls -d ${PKGD}${base_libdir}/* | xargs -iBLAH cp -pPR BLAH $treedir/lib
- if [ -f ${CROSS_DIR}/${TARGET_SYS}/lib/libgcc_s.so ]; then
- cp -pPR ${CROSS_DIR}/${TARGET_SYS}/lib/libgcc_s.so $treedir/lib
+ if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.so ]; then
+ cp -pPR ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.so $treedir/lib
fi
- if [ -f ${CROSS_DIR}/${TARGET_SYS}/lib/libgcc_s.so.* ]; then
- cp -pPR ${CROSS_DIR}/${TARGET_SYS}/lib/libgcc_s.so.* $treedir/lib
+ if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.so.* ]; then
+ cp -pPR ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.so.* $treedir/lib
fi
install -m 0755 ${PKGD}${bindir}/localedef $treedir/bin
}
diff --git a/classes/icecc.bbclass b/classes/icecc.bbclass
index fb6045d21b..cd5a25bf28 100644
--- a/classes/icecc.bbclass
+++ b/classes/icecc.bbclass
@@ -46,7 +46,8 @@ def create_cross_env(bb,d):
return ""
import tarfile, socket, time
- ice_dir = bb.data.expand('${CROSS_DIR}', d)
+ ice_dir = bb.data.expand('${STAGING_DIR_NATIVE}${prefix_native}/${BASE_PACKAGE_ARCH}', d)
+ staging_dir = bb.data.expand('${STAGING_DIR_TARGET}', d)
prefix = bb.data.expand('${HOST_PREFIX}' , d)
distro = bb.data.expand('${DISTRO}', d)
target_sys = bb.data.expand('${TARGET_SYS}', d)
@@ -57,12 +58,12 @@ def create_cross_env(bb,d):
# Stupid check to determine if we have built a libc and a cross
# compiler.
try:
- os.stat(os.path.join(ice_dir, target_sys, 'lib', 'libstdc++.so'))
- os.stat(os.path.join(ice_dir, target_sys, 'bin', 'g++'))
+ os.stat(os.path.join(staging_dir, 'usr', 'lib', 'libstdc++.so'))
+ os.stat(os.path.join(ice_dir, 'bin', "%s-g++" % target_sys))
except: # no cross compiler built yet
return ""
- VERSION = icc_determine_gcc_version( os.path.join(ice_dir,target_sys,"bin","g++") )
+ VERSION = icc_determine_gcc_version( os.path.join(ice_dir,"bin","%s-g++" % target_sys) )
cross_name = prefix + distro + "-" + target_sys + "-" + float + "-" + VERSION + "-" + name
tar_file = os.path.join(ice_dir, 'ice', cross_name + '.tar.gz')
@@ -95,7 +96,7 @@ def create_cross_env(bb,d):
def create_native_env(bb,d):
import tarfile, socket, time
- ice_dir = bb.data.expand('${CROSS_DIR}', d)
+ ice_dir = bb.data.expand('${STAGING_DIR_NATIVE}${prefix_native}/${BASE_PACKAGE_ARCH}', d)
prefix = bb.data.expand('${HOST_PREFIX}' , d)
distro = bb.data.expand('${DISTRO}', d)
target_sys = bb.data.expand('${TARGET_SYS}', d)
@@ -142,7 +143,7 @@ def get_cross_kernel_cc(bb,d):
def create_cross_kernel_env(bb,d):
import tarfile, socket, time
- ice_dir = bb.data.expand('${CROSS_DIR}', d)
+ ice_dir = bb.data.expand('${STAGING_DIR_NATIVE}${prefix_native}/${BASE_PACKAGE_ARCH}', d)
prefix = bb.data.expand('${HOST_PREFIX}' , d)
distro = bb.data.expand('${DISTRO}', d)
target_sys = bb.data.expand('${TARGET_SYS}', d)
@@ -159,7 +160,7 @@ def create_cross_kernel_env(bb,d):
return ""
VERSION = icc_determine_gcc_version( os.path.join(ice_dir,"bin",kernel_cc) )
- cross_name = prefix + distro + "-" + target_sys + "-" + float + "-" + VERSION + "-" + name
+ cross_name = prefix + distro + "-kernel-" + target_sys + "-" + float + "-" + VERSION + "-" + name
tar_file = os.path.join(ice_dir, 'ice', cross_name + '.tar.gz')
try:
@@ -319,3 +320,7 @@ do_configure_prepend() {
do_compile_prepend() {
set_icecc_env
}
+
+do_install_prepend() {
+ set_icecc_env
+}
diff --git a/classes/image.bbclass b/classes/image.bbclass
index f695d3c4df..2105195003 100644
--- a/classes/image.bbclass
+++ b/classes/image.bbclass
@@ -205,7 +205,7 @@ log_check() {
# can decide if they want it or not
zap_root_password () {
- sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
+ sed 's%^root:[^:]*:%root::%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
}
diff --git a/classes/insane.bbclass b/classes/insane.bbclass
index 2118a27fbd..b8743d1918 100644
--- a/classes/insane.bbclass
+++ b/classes/insane.bbclass
@@ -32,58 +32,59 @@ PACKAGEFUNCS += " do_package_qa "
def package_qa_get_machine_dict():
return {
"darwin9" : {
- "arm" : (40, 0, 0, True, True),
+ "arm" : ( 40, 0, 0, True, True),
},
"linux" : {
- "arm" : (40, 97, 0, True, True),
- "armeb": (40, 97, 0, False, True),
- "powerpc": (20, 0, 0, False, True),
- "i386": ( 3, 0, 0, True, True),
- "i486": ( 3, 0, 0, True, True),
- "i586": ( 3, 0, 0, True, True),
- "i686": ( 3, 0, 0, True, True),
- "x86_64": (62, 0, 0, True, False),
- "ia64": (50, 0, 0, True, False),
- "alpha": (36902, 0, 0, True, False),
- "hppa": (15, 3, 0, False, True),
- "m68k": ( 4, 0, 0, False, True),
- "mips": ( 8, 0, 0, False, True),
- "mipsel": ( 8, 0, 0, True, True),
- "s390": (22, 0, 0, False, True),
- "sh4": (42, 0, 0, True, True),
- "sparc": ( 2, 0, 0, False, True),
+ "arm" : ( 40, 97, 0, True, True),
+ "armeb": ( 40, 97, 0, False, True),
+ "i386": ( 3, 0, 0, True, True),
+ "i486": ( 3, 0, 0, True, True),
+ "i586": ( 3, 0, 0, True, True),
+ "i686": ( 3, 0, 0, True, True),
+ "x86_64": ( 62, 0, 0, True, False),
+ "ia64": ( 50, 0, 0, True, False),
+ "alpha": (36902, 0, 0, True, False),
+ "hppa": ( 15, 3, 0, False, True),
+ "m68k": ( 4, 0, 0, False, True),
+ "mips": ( 8, 0, 0, False, True),
+ "mipsel": ( 8, 0, 0, True, True),
+ "nios2": ( 113, 0, 0, True, True),
+ "powerpc": ( 20, 0, 0, False, True),
+ "s390": ( 22, 0, 0, False, True),
+ "sh4": ( 42, 0, 0, True, True),
+ "sparc": ( 2, 0, 0, False, True),
},
"linux-uclibc" : {
- "arm" : ( 40, 97, 0, True, True),
- "armeb": ( 40, 97, 0, False, True),
- "powerpc": ( 20, 0, 0, False, True),
- "i386": ( 3, 0, 0, True, True),
- "i486": ( 3, 0, 0, True, True),
- "i586": ( 3, 0, 0, True, True),
- "i686": ( 3, 0, 0, True, True),
- "x86_64": ( 62, 0, 0, True, False),
- "mips": ( 8, 0, 0, False, True),
- "mipsel": ( 8, 0, 0, True, True),
- "avr32": (6317, 0, 0, False, True),
- "sh4": (42, 0, 0, True, True),
-
+ "arm" : ( 40, 97, 0, True, True),
+ "armeb": ( 40, 97, 0, False, True),
+ "avr32": ( 6317, 0, 0, False, True),
+ "i386": ( 3, 0, 0, True, True),
+ "i486": ( 3, 0, 0, True, True),
+ "i586": ( 3, 0, 0, True, True),
+ "i686": ( 3, 0, 0, True, True),
+ "x86_64": ( 62, 0, 0, True, False),
+ "mips": ( 8, 0, 0, False, True),
+ "mipsel": ( 8, 0, 0, True, True),
+ "nios2": ( 113, 0, 0, True, True),
+ "powerpc": ( 20, 0, 0, False, True),
+ "sh4": ( 42, 0, 0, True, True),
},
"uclinux-uclibc" : {
- "bfin": ( 106, 0, 0, True, True),
+ "bfin": ( 106, 0, 0, True, True),
},
"linux-gnueabi" : {
- "arm" : (40, 0, 0, True, True),
- "armeb" : (40, 0, 0, False, True),
+ "arm" : ( 40, 0, 0, True, True),
+ "armeb" : ( 40, 0, 0, False, True),
},
"linux-uclibceabi" : {
- "arm" : (40, 0, 0, True, True),
- "armeb" : (40, 0, 0, False, True),
+ "arm" : ( 40, 0, 0, True, True),
+ "armeb" : ( 40, 0, 0, False, True),
},
"linux-gnuspe" : {
- "powerpc": (20, 0, 0, False, True),
+ "powerpc": ( 20, 0, 0, False, True),
},
"linux-uclibcspe" : {
- "powerpc": (20, 0, 0, False, True),
+ "powerpc": ( 20, 0, 0, False, True),
},
}
@@ -255,8 +256,10 @@ def package_qa_check_desktop(path, name, d, elf):
Run all desktop files through desktop-file-validate.
"""
sane = True
+ env_path = bb.data.getVar('PATH', d, True)
+
if path.endswith(".desktop"):
- output = os.popen("desktop-file-validate %s" % path)
+ output = os.popen("PATH=%s desktop-file-validate %s" % (env_path, path))
# This only produces output on errors
for l in output:
sane = package_qa_handle_error(7, l.strip(), name, path, d)
diff --git a/classes/java-library.bbclass b/classes/java-library.bbclass
index 06c3170544..904b4f16be 100644
--- a/classes/java-library.bbclass
+++ b/classes/java-library.bbclass
@@ -59,6 +59,7 @@ java_install() {
do_install() {
java_install
+ java_stage
}
java_stage() {
diff --git a/classes/java-native.bbclass b/classes/java-native.bbclass
index 7b67c6041c..cade7b18de 100644
--- a/classes/java-native.bbclass
+++ b/classes/java-native.bbclass
@@ -6,6 +6,4 @@
inherit native
-do_stage () {
- java_stage
-}
+NATIVE_INSTALL_WORKS = "1"
diff --git a/classes/kernel-arch.bbclass b/classes/kernel-arch.bbclass
index 8894fa554c..d615fb727c 100644
--- a/classes/kernel-arch.bbclass
+++ b/classes/kernel-arch.bbclass
@@ -7,12 +7,13 @@
valid_archs = "alpha cris ia64 \
x86_64 i386 x86 \
m68knommu m68k ppc powerpc ppc64 \
- sparc sparc64 \
+ sparc sparc64 \
arm arm26 \
m32r mips \
- sh sh64 um h8300 \
- parisc s390 v850 \
- avr32 blackfin"
+ nios2 \
+ sh sh64 um h8300 \
+ parisc s390 v850 \
+ avr32 blackfin"
def map_kernel_arch(a, d):
import re
@@ -25,7 +26,7 @@ def map_kernel_arch(a, d):
elif re.match('mipsel$', a): return 'mips'
elif re.match('sh(3|4)$', a): return 'sh'
elif re.match('bfin', a): return 'blackfin'
- elif a in valid_archs: return a
+ elif a in valid_archs: return a
else:
bb.error("cannot map '%s' to a linux kernel architecture" % a)
diff --git a/classes/kernel.bbclass b/classes/kernel.bbclass
index a157dfc0e9..9555f30a30 100644
--- a/classes/kernel.bbclass
+++ b/classes/kernel.bbclass
@@ -14,7 +14,7 @@ python __anonymous () {
kerneltype = bb.data.getVar('KERNEL_IMAGETYPE', d, 1) or ''
if kerneltype == 'uImage':
depends = bb.data.getVar("DEPENDS", d, 1)
- depends = "%s u-boot-mkimage-openmoko-native" % depends
+ depends = "%s u-boot-mkimage-native" % depends
bb.data.setVar("DEPENDS", depends, d)
image = bb.data.getVar('INITRAMFS_IMAGE', d, True)
@@ -476,6 +476,7 @@ python populate_packages_prepend () {
bb.data.setVar('ALLOW_EMPTY_' + metapkg, "1", d)
bb.data.setVar('FILES_' + metapkg, "", d)
blacklist = [ 'kernel-dev', 'kernel-image', 'kernel-base', 'kernel-vmlinux' ]
+ depchains = (d.getVar("DEPCHAIN_POST", True) or "").split()
for l in module_deps.values():
for i in l:
pkg = module_pattern % legitimize_package_name(re.match(module_regex, os.path.basename(i)).group(1))
@@ -483,7 +484,7 @@ python populate_packages_prepend () {
metapkg_rdepends = []
packages = bb.data.getVar('PACKAGES', d, 1).split()
for pkg in packages[1:]:
- if not pkg in blacklist and not pkg in metapkg_rdepends:
+ if not pkg in blacklist and not pkg in metapkg_rdepends and not any(pkg.endswith(post) for post in depchains):
metapkg_rdepends.append(pkg)
bb.data.setVar('RDEPENDS_' + metapkg, ' '.join(metapkg_rdepends), d)
bb.data.setVar('DESCRIPTION_' + metapkg, 'Kernel modules meta package', d)
diff --git a/classes/klibc.bbclass b/classes/klibc.bbclass
new file mode 100644
index 0000000000..d68436728c
--- /dev/null
+++ b/classes/klibc.bbclass
@@ -0,0 +1,9 @@
+# klcc-cross depends on klibc
+DEPENDS =+ "klcc-cross"
+
+export CC=${TARGET_PREFIX}klcc
+
+# reset inherited OE flags to avoid e.g. ggdb3 and keep size small
+export CFLAGS=""
+export CPPFLAGS=""
+export LDFLAGS=""
diff --git a/classes/magicbox-image.bbclass b/classes/magicbox-image.bbclass
index c75e69cac2..37f4c7947f 100644
--- a/classes/magicbox-image.bbclass
+++ b/classes/magicbox-image.bbclass
@@ -1,3 +1,5 @@
+DEPENDS += "u-boot-mkimage-native"
+
magicbox_gen_images() {
# find latest kernel
KERNEL=`ls -tr ${DEPLOY_DIR_IMAGE}/uImage* | tail -n 1`
@@ -9,7 +11,7 @@ magicbox_gen_images() {
#squashfs
#We need to prep the image so that u-boot recognizes it
mv ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.squashfs.bin
- ${STAGING_BINDIR_NATIVE}/mkimage -A ppc -O linux -T ramdisk -C none -n "OPLinux-uclibc-squashfs" \
+ ${STAGING_BINDIR_NATIVE}/uboot-mkimage -A ppc -O linux -T ramdisk -C none -n "OPLinux-uclibc-squashfs" \
-d ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.squashfs.bin ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.squashfs.bin
@@ -17,7 +19,7 @@ magicbox_gen_images() {
#squashfs-lzma
#same as squashfs
mv ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzma ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.squashfs-lzma.bin
- ${STAGING_BINDIR_NATIVE}/mkimage -A ppc -O linux -T ramdisk -C none -n "OPLinux-uclibc-squashfs-lzma" \
+ ${STAGING_BINDIR_NATIVE}/uboot-mkimage -A ppc -O linux -T ramdisk -C none -n "OPLinux-uclibc-squashfs-lzma" \
-d ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.squashfs-lzma.bin ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzma
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.squashfs-lzma.bin
diff --git a/classes/module-base.bbclass b/classes/module-base.bbclass
index bc53e1bad5..9aaaa4e8e3 100644
--- a/classes/module-base.bbclass
+++ b/classes/module-base.bbclass
@@ -7,7 +7,12 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
# A machine.conf or local.conf can increase MACHINE_KERNEL_PR to force
# rebuilds for kernel and external modules
-PR = "${MACHINE_KERNEL_PR}"
+python __anonymous () {
+ machine_kernel_pr = bb.data.getVar('MACHINE_KERNEL_PR', d, True)
+
+ if machine_kernel_pr:
+ bb.data.setVar('PR', machine_kernel_pr, d)
+}
export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
export KERNEL_SOURCE = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-source')}"
diff --git a/classes/module_strip.bbclass b/classes/module_strip.bbclass
index 2650f71d50..998fb86461 100644
--- a/classes/module_strip.bbclass
+++ b/classes/module_strip.bbclass
@@ -1,5 +1,8 @@
PACKAGESTRIPFUNCS += "do_strip_modules"
+# may be inherited by kernel.bbclass which sets KERNEL_MAJOR_VERSION
+KERNEL_MAJOR_VERSION ?= "${@get_kernelmajorversion('${KERNEL_VERSION}')}"
+
do_strip_modules () {
if test -e ${PKGD}/lib/modules; then
if [ "${KERNEL_MAJOR_VERSION}" == "2.6" ]; then
diff --git a/classes/mozilla.bbclass b/classes/mozilla.bbclass
index 4e3054b9ab..2b2f770d58 100644
--- a/classes/mozilla.bbclass
+++ b/classes/mozilla.bbclass
@@ -1,5 +1,5 @@
SECTION = "x11/utils"
-DEPENDS += "gnu-config-native virtual/libintl xt libxi \
+DEPENDS += "gnu-config-native virtual/libintl libxt libxi \
zip-native gtk+ orbit2 libidl-native"
LICENSE = "MPL NPL"
SRC_URI += "file://mozconfig"
diff --git a/classes/nas100d-image.bbclass b/classes/nas100d-image.bbclass
index 0877b4f84d..6c147cc0fb 100644
--- a/classes/nas100d-image.bbclass
+++ b/classes/nas100d-image.bbclass
@@ -1,6 +1,6 @@
nas100d_pack_image () {
install -d ${DEPLOY_DIR_IMAGE}/firmupgrade
- install -m 0755 ${DEPLOY_DIR_IMAGE}/zImage-nslu2${SITEINFO_ENDIANESS}.bin \
+ install -m 0755 ${DEPLOY_DIR_IMAGE}/zImage-nslu2${SITEINFO_ENDIANNESS}.bin \
${DEPLOY_DIR_IMAGE}/firmupgrade/ip-ramdisk
install -m 0644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 \
${DEPLOY_DIR_IMAGE}/firmupgrade/rootfs.gz
diff --git a/classes/native.bbclass b/classes/native.bbclass
index 3437836de7..345bd8028d 100644
--- a/classes/native.bbclass
+++ b/classes/native.bbclass
@@ -91,6 +91,12 @@ ORIG_DEPENDS := "${DEPENDS}"
DEPENDS_virtclass-native ?= "${ORIG_DEPENDS}"
+def native_virtclass_add_override(d):
+ if "native" in (bb.data.getVar('BBCLASSEXTEND', d, True) or ""):
+ bb.data.setVar("OVERRIDES", bb.data.getVar("OVERRIDES", d, False) + ":virtclass-native", d)
+
+OVERRIDES .= "${@native_virtclass_add_override(d)}"
+
python __anonymous () {
# If we've a legacy native do_stage, we need to neuter do_install
stagefunc = bb.data.getVar('do_stage', d, True)
@@ -124,6 +130,5 @@ python __anonymous () {
provides = provides.replace(prov, prov + "-native")
bb.data.setVar("PROVIDES", provides, d)
- bb.data.setVar("OVERRIDES", bb.data.getVar("OVERRIDES", d, False) + ":virtclass-native", d)
}
diff --git a/classes/nativesdk.bbclass b/classes/nativesdk.bbclass
index 75f5790121..f3f993033f 100644
--- a/classes/nativesdk.bbclass
+++ b/classes/nativesdk.bbclass
@@ -18,8 +18,6 @@ python () {
#STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}-nativesdk"
#STAGING_DIR_TARGET = "${STAGING_DIR}/${BASEPKG_TARGET_SYS}-nativesdk"
-CROSS_DIR = "${TMPDIR}/cross/${HOST_ARCH}"
-
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
HOST_OS = "${SDK_OS}"
@@ -54,6 +52,8 @@ export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
ORIG_DEPENDS := "${DEPENDS}"
DEPENDS_virtclass-nativesdk ?= "${ORIG_DEPENDS}"
+OVERRIDES .= ":virtclass-nativesdk"
+
python __anonymous () {
pn = bb.data.getVar("PN", d, True)
depends = bb.data.getVar("DEPENDS_virtclass-nativesdk", d, True)
@@ -76,7 +76,4 @@ python __anonymous () {
if not prov.endswith("-nativesdk"):
provides = provides.replace(prov, prov + "-nativesdk")
bb.data.setVar("PROVIDES", provides, d)
- bb.data.setVar("OVERRIDES", bb.data.getVar("OVERRIDES", d, False) + ":virtclass-nativesdk", d)
}
-
-
diff --git a/classes/openmoko-base.bbclass b/classes/openmoko-base.bbclass
index 8cbf7df884..f1ca536c54 100644
--- a/classes/openmoko-base.bbclass
+++ b/classes/openmoko-base.bbclass
@@ -13,7 +13,7 @@ def openmoko_base_get_subdir(d):
SUBDIR = "${@openmoko_base_get_subdir(d)}"
-SRC_URI := "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${PN};proto=http"
+SRC_URI = "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${PN};proto=http"
S = "${WORKDIR}/${PN}"
FILES_${PN} += "${datadir}/icons"
diff --git a/classes/openmoko2.bbclass b/classes/openmoko2.bbclass
index 233c721ff7..1dfef64963 100644
--- a/classes/openmoko2.bbclass
+++ b/classes/openmoko2.bbclass
@@ -22,7 +22,7 @@ def openmoko_two_get_subdir(d):
LICENSE = "${@openmoko_two_get_license(d)}"
SUBDIR = "${@openmoko_two_get_subdir(d)}"
-SRC_URI := "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${PN};proto=http"
+SRC_URI = "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${PN};proto=http"
S = "${WORKDIR}/${PN}"
FILES_${PN} += "${datadir}/icons"
diff --git a/classes/package.bbclass b/classes/package.bbclass
index 77a20bf9c5..c3781330bb 100644
--- a/classes/package.bbclass
+++ b/classes/package.bbclass
@@ -1029,23 +1029,23 @@ def package_run_hooks(f, d):
bb.parse.parse_py.BBHandler.feeder(line, l, fn, os.path.basename(fn), d)
line += 1
fp.close()
- anonqueue = bb.data.getVar("__anonqueue", d, True) or []
- body = [x['content'] for x in anonqueue]
- flag = { 'python' : 1, 'func' : 1 }
- bb.data.setVar("__anonfunc", "\n".join(body), d)
- bb.data.setVarFlags("__anonfunc", flag, d)
- try:
- t = bb.data.getVar('T', d)
- bb.data.setVar('T', '${TMPDIR}/', d)
- bb.build.exec_func("__anonfunc", d)
- bb.data.delVar('T', d)
- if t:
- bb.data.setVar('T', t, d)
- except Exception, e:
- bb.msg.debug(1, bb.msg.domain.Parsing, "Exception when executing anonymous function: %s" % e)
- raise
- bb.data.delVar("__anonqueue", d)
- bb.data.delVar("__anonfunc", d)
+ anonqueue = bb.data.getVar("__anonqueue", d, True) or []
+ body = [x['content'] for x in anonqueue]
+ flag = { 'python' : 1, 'func' : 1 }
+ bb.data.setVar("__anonfunc", "\n".join(body), d)
+ bb.data.setVarFlags("__anonfunc", flag, d)
+ try:
+ t = bb.data.getVar('T', d)
+ bb.data.setVar('T', '${TMPDIR}/', d)
+ bb.build.exec_func("__anonfunc", d)
+ bb.data.delVar('T', d)
+ if t:
+ bb.data.setVar('T', t, d)
+ except Exception, e:
+ bb.msg.debug(1, bb.msg.domain.Parsing, "Exception when executing anonymous function: %s" % e)
+ raise
+ bb.data.delVar("__anonqueue", d)
+ bb.data.delVar("__anonfunc", d)
python package_do_package () {
packages = (bb.data.getVar('PACKAGES', d, True) or "").split()
diff --git a/classes/package_ipk.bbclass b/classes/package_ipk.bbclass
index 435d59fb06..5d388dacd9 100644
--- a/classes/package_ipk.bbclass
+++ b/classes/package_ipk.bbclass
@@ -1,6 +1,5 @@
inherit package
-BOOTSTRAP_EXTRA_RDEPENDS += "opkg-collateral opkg"
IMAGE_PKGTYPE ?= "ipk"
IPKGCONF_TARGET = "${STAGING_ETCDIR_NATIVE}/opkg.conf"
@@ -132,6 +131,9 @@ package_generate_ipkg_conf () {
echo "src oe-${SDK_SYS}-sdk-$arch file:${DEPLOY_DIR_IPK}/${SDK_SYS}-sdk-$arch" >> ${IPKGCONF_CANSDK}
fi
done
+ echo "lists_dir ext /var/lib/opkg" >> ${IPKGCONF_TARGET}
+ echo "lists_dir ext /var/lib/opkg" >> ${IPKGCONF_SDK}
+ echo "lists_dir ext /var/lib/opkg" >> ${IPKGCONF_CANSDK}
}
python do_package_ipk () {
@@ -196,7 +198,7 @@ python do_package_ipk () {
except ValueError:
pass
if not g and bb.data.getVar('ALLOW_EMPTY', localdata) != "1":
- bb.note("Not creating empty archive for %s-%s" % (pkg, bb.data.expand('${PV}-${PR}${DISTRO_PR}', localdata, True)))
+ bb.note("Not creating empty archive for %s-%s" % (pkg, bb.data.expand('${PV}-${PR}${DISTRO_PR}', localdata)))
bb.utils.unlockfile(lf)
continue
diff --git a/classes/packaged-staging.bbclass b/classes/packaged-staging.bbclass
index 56f9dc0dcc..85f08ee143 100644
--- a/classes/packaged-staging.bbclass
+++ b/classes/packaged-staging.bbclass
@@ -12,14 +12,14 @@
# bitbake.conf set PSTAGING_ACTIVE = "0", this class sets to "1" if we're active
#
PSTAGE_PKGVERSION = "${PV}-${PR}"
-PSTAGE_PKGARCH = "${BUILD_SYS}"
+PSTAGE_PKGARCH = "${PACKAGE_ARCH}-${HOST_OS}"
PSTAGE_EXTRAPATH ?= "/${OELAYOUT_ABI}/${DISTRO_PR}/"
PSTAGE_PKGPATH = "${DISTRO}${PSTAGE_EXTRAPATH}"
PSTAGE_PKGPN = "${@bb.data.expand('staging-${PN}-${MULTIMACH_ARCH}${TARGET_VENDOR}-${TARGET_OS}', d).replace('_', '-')}"
PSTAGE_PKGNAME = "${PSTAGE_PKGPN}_${PSTAGE_PKGVERSION}_${PSTAGE_PKGARCH}.ipk"
PSTAGE_PKG = "${PSTAGE_DIR}/${PSTAGE_PKGPATH}/${PSTAGE_PKGNAME}"
PSTAGE_WORKDIR = "${TMPDIR}/pstage"
-PSTAGE_SCAN_CMD ?= "find ${PSTAGE_TMPDIR_STAGE} \( -name "*.la" -o -name "*-config"\) -type f"
+PSTAGE_SCAN_CMD ?= "find ${PSTAGE_TMPDIR_STAGE} \( -name "*.la" -o -name "*-config" \) -type f"
PSTAGE_NATIVEDEPENDS = "\
shasum-native \
@@ -28,30 +28,21 @@ PSTAGE_NATIVEDEPENDS = "\
BB_STAMP_WHITELIST = "${PSTAGE_NATIVEDEPENDS}"
-def _package_unlink (f):
- import os, errno
- try:
- os.unlink(f)
- return True
- except OSError, e:
- if e.errno == errno.ENOENT:
- return False
- raise
-
-python () {
+python __anonymous() {
pstage_allowed = True
- # These classes encode staging paths into the binary data so can only be
- # reused if the path doesn't change/
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
- path = bb.data.getVar('PSTAGE_PKGPATH', d, 1)
- path = path + bb.data.getVar('TMPDIR', d, 1).replace('/', '-')
- bb.data.setVar('PSTAGE_PKGPATH', path, d)
- scan_cmd = "grep -Irl ${STAGING_DIR} ${PSTAGE_TMDPDIR_STAGE}"
+ # We need PSTAGE_PKGARCH to contain information about the target.
+ if bb.data.inherits_class('cross', d):
+ bb.data.setVar('PSTAGE_PKGARCH', "${HOST_SYS}-${PACKAGE_ARCH}-${TARGET_OS}", d)
+
+ # These classes encode staging paths data files so we must mangle them
+ # for reuse.
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('sdk', d):
+ scan_cmd = "grep -Irl ${STAGING_DIR} ${PSTAGE_TMPDIR_STAGE}"
bb.data.setVar('PSTAGE_SCAN_CMD', scan_cmd, d)
- # PSTAGE_NATIVEDEPENDS lists the packages we need before we can use packaged
- # staging. There will always be some packages we depend on.
+ # PSTAGE_NATIVEDEPENDS lists the packages we need before we can use
+ # packaged staging. There will always be some packages we depend on.
if bb.data.inherits_class('native', d):
pn = bb.data.getVar('PN', d, True)
nativedeps = bb.data.getVar('PSTAGE_NATIVEDEPENDS', d, True).split()
@@ -62,7 +53,10 @@ python () {
if bb.data.inherits_class('image', d):
pstage_allowed = False
- if bb.data.getVar('PSTAGING_DISABLED', d, True) == "1":
+ # We need OVERRIDES to be evaluated and applied.
+ localdata = d.createCopy()
+ bb.data.update_data(localdata)
+ if localdata.getVar('PSTAGING_DISABLED', True) == "1":
pstage_allowed = False
# Add task dependencies if we're active, otherwise mark packaged staging
@@ -106,7 +100,7 @@ def pstage_manualclean(srcname, destvarname, d):
if (file == "staging.lock"):
continue
filepath = os.path.join(walkroot, file).replace(src, dest)
- _package_unlink(filepath)
+ oe.path.remove(filepath)
def pstage_set_pkgmanager(d):
path = bb.data.getVar("PATH", d, 1)
@@ -134,7 +128,6 @@ def pstage_cleanpackage(pkgname, d):
else:
bb.debug(1, "Manually removing any installed files from staging...")
pstage_manualclean("sysroots", "STAGING_DIR", d)
- pstage_manualclean("cross", "CROSS_DIR", d)
pstage_manualclean("deploy", "DEPLOY_DIR", d)
bb.utils.unlockfile(lf)
@@ -148,24 +141,23 @@ do_clean_prepend() {
pstage_cleanpackage(removepkg, d)
stagepkg = bb.data.expand("${PSTAGE_PKG}", d)
- bb.note("Removing staging package %s" % base_path_out(stagepkg, d))
- # Add a wildcard to the end of stagepkg to also get its md5
- # if it's a fetched package
- os.system('rm -rf ' + stagepkg + '*')
+ if os.path.exists(stagepkg):
+ bb.note("Removing staging package %s" % base_path_out(stagepkg, d))
+ oe.path.remove(stagepkg)
+ oe.path.remove(stagepkg + ".md5")
}
staging_helper () {
# Assemble appropriate opkg.conf
conffile=${PSTAGE_MACHCONFIG}
mkdir -p ${PSTAGE_WORKDIR}/pstaging_lists
+ arch="${PSTAGE_PKGARCH}"
if [ ! -e $conffile ]; then
- ipkgarchs="${BUILD_SYS}"
- priority=1
- for arch in $ipkgarchs; do
- echo "arch $arch $priority" >> $conffile
- priority=$(expr $priority + 5)
- done
+ echo "arch $arch 1" > $conffile
echo "dest root /" >> $conffile
+ elif [ `grep -c " $arch " $conffile` -eq 0 ]; then
+ priority=$(expr `grep -cE "^arch" $conffile` + 1)
+ sed -i -e "/dest/iarch $arch $priority" $conffile
fi
if [ ! -e ${TMPDIR}${libdir_native}/opkg/info/ ]; then
mkdir -p ${TMPDIR}${libdir_native}/opkg/info/
@@ -269,15 +261,13 @@ python packagestage_scenefunc () {
# Remove the stamps and files we added above
# FIXME - we should really only remove the stamps we added
- for fname in glob.glob(stamp + '.*'):
- _package_unlink(fname)
-
- os.system(bb.data.expand("rm -rf ${WORKDIR}/tstage", d))
+ for fname in glob.glob(stamp + '.*'):
+ oe.path.remove(fname)
+ oe.path.remove(bb.data.expand("${WORKDIR}/tstage", d))
if stageok:
bb.note("Staging package found, using it for %s." % file)
installcmd = bb.data.getVar("PSTAGE_INSTALL_CMD", d, 1)
- lf = bb.utils.lockfile(bb.data.expand("${SYSROOT_LOCK}", d))
ret = os.system("PATH=\"%s\" %s %s" % (path, installcmd, stagepkg))
bb.utils.unlockfile(lf)
if ret != 0:
@@ -306,15 +296,14 @@ python packagedstage_stampfixing_eventhandler() {
# We're targetting a task which was skipped with packaged staging
# so we need to remove the autogenerated stamps.
for task in taskscovered:
- dir = "%s.do_%s" % (e.stampPrefix[fn], task)
- _package_unlink(dir)
- _package_unlink(stamp)
+ covered = "%s.do_%s" % (e.stampPrefix[fn], task)
+ oe.path.remove(covered)
+ oe.path.remove(stamp)
}
populate_sysroot_preamble () {
if [ "$PSTAGING_ACTIVE" = "1" ]; then
stage-manager -p ${STAGING_DIR} -c ${PSTAGE_WORKDIR}/stamp-cache-staging -u || true
- stage-manager -p ${CROSS_DIR} -c ${PSTAGE_WORKDIR}/stamp-cache-cross -u || true
fi
}
@@ -330,21 +319,13 @@ populate_sysroot_postamble () {
if [ "$exitcode" != "5" -a "$exitcode" != "0" ]; then
exit $exitcode
fi
- stage-manager -p ${CROSS_DIR} -c ${PSTAGE_WORKDIR}/stamp-cache-cross -u -d ${PSTAGE_TMPDIR_STAGE}/cross/${BASE_PACKAGE_ARCH}
- if [ "$exitcode" != "5" -a "$exitcode" != "0" ]; then
- exit $exitcode
- fi
set -e
fi
}
packagedstaging_fastpath () {
- if [ "$PSTAGING_ACTIVE" = "1" ]; then
- mkdir -p ${PSTAGE_TMPDIR_STAGE}/sysroots/
- mkdir -p ${PSTAGE_TMPDIR_STAGE}/cross/${BASE_PACKAGE_ARCH}/
- cp -fpPR ${SYSROOT_DESTDIR}/${STAGING_DIR}/* ${PSTAGE_TMPDIR_STAGE}/sysroots/ || /bin/true
- cp -fpPR ${SYSROOT_DESTDIR}/${CROSS_DIR}/* ${PSTAGE_TMPDIR_STAGE}/cross/${BASE_PACKAGE_ARCH}/ || /bin/true
- fi
+ mkdir -p ${PSTAGE_TMPDIR_STAGE}/sysroots/
+ cp -fpPR ${SYSROOT_DESTDIR}/${STAGING_DIR}/* ${PSTAGE_TMPDIR_STAGE}/sysroots/ || /bin/true
}
do_populate_sysroot[dirs] =+ "${PSTAGE_DIR}"
@@ -375,7 +356,7 @@ staging_packager () {
if [ "$srcuri" == "" ]; then
srcuri="OpenEmbedded"
fi
- echo "Source: ${SRC_URI}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
+ echo "Source: $srcuri" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
# Deal with libtool not supporting sysroots
# Need to remove hardcoded paths and fix these when we install the
@@ -418,7 +399,7 @@ python staging_package_libtoolhack () {
fixmefd = open(fixmefn,"r")
fixmefiles = fixmefd.readlines()
fixmefd.close()
- os.system('rm -f ' + fixmefn)
+ oe.path.remove(fixmefn)
for file in fixmefiles:
os.system("sed -i -e s:FIXMESTAGINGDIR:%s:g %s" % (staging, tmpdir + '/' + file))
except IOError:
diff --git a/classes/patch.bbclass b/classes/patch.bbclass
index 1af374b39b..58b931f966 100644
--- a/classes/patch.bbclass
+++ b/classes/patch.bbclass
@@ -7,6 +7,7 @@ PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
python patch_do_patch() {
import oe.patch
+ import oe.unpack
src_uri = (bb.data.getVar('SRC_URI', d, 1) or '').split()
if not src_uri:
@@ -31,40 +32,42 @@ python patch_do_patch() {
path = os.getenv('PATH')
os.putenv('PATH', bb.data.getVar('PATH', d, 1))
- patchset = cls(s, d)
- patchset.Clean()
- resolver = rcls(patchset)
+ classes = {}
+ src_uri = d.getVar("SRC_URI", True).split()
+ srcurldata = bb.fetch.init(src_uri, d, True)
workdir = bb.data.getVar('WORKDIR', d, 1)
- for url in src_uri:
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if not "patch" in parm:
- continue
+ for url in d.getVar("SRC_URI", True).split():
+ urldata = srcurldata[url]
- bb.fetch.init([url],d)
- url = bb.encodeurl((type, host, path, user, pswd, []))
- local = os.path.join('/', bb.fetch.localpath(url, d))
+ local = urldata.localpath
+ if not local:
+ raise bb.build.FuncFailed('Unable to locate local file for %s' % url)
- # did it need to be unpacked?
- dots = os.path.basename(local).split(".")
- if dots[-1] in ['gz', 'bz2', 'Z']:
- unpacked = os.path.join(bb.data.getVar('WORKDIR', d),'.'.join(dots[0:-1]))
- else:
- unpacked = local
- unpacked = bb.data.expand(unpacked, d)
+ base, ext = os.path.splitext(os.path.basename(local))
+ if ext in ('.gz', '.bz2', '.Z'):
+ local = oe.path.join(workdir, base)
+
+ if not oe.unpack.is_patch(local, urldata.parm):
+ continue
+
+ parm = urldata.parm
- if "pnum" in parm:
- pnum = parm["pnum"]
+ if "striplevel" in parm:
+ striplevel = parm["striplevel"]
+ elif "pnum" in parm:
+ bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+ striplevel = parm["pnum"]
else:
- pnum = "1"
+ striplevel = '1'
if "pname" in parm:
pname = parm["pname"]
else:
- pname = os.path.basename(unpacked)
+ pname = os.path.basename(local)
- if "mindate" in parm or "maxdate" in parm:
+ if "mindate" in parm or "maxdate" in parm:
pn = bb.data.getVar('PN', d, 1)
srcdate = bb.data.getVar('SRCDATE_%s' % pn, d, 1)
if not srcdate:
@@ -94,9 +97,24 @@ python patch_do_patch() {
bb.note("Patch '%s' applies to earlier revisions" % pname)
continue
- bb.note("Applying patch '%s' (%s)" % (pname, oe.path.format_display(unpacked, d)))
+ if "patchdir" in parm:
+ patchdir = parm["patchdir"]
+ if not os.path.isabs(patchdir):
+ patchdir = os.path.join(s, patchdir)
+ else:
+ patchdir = s
+
+ if not patchdir in classes:
+ patchset = cls(patchdir, d)
+ resolver = rcls(patchset)
+ classes[patchdir] = (patchset, resolver)
+ patchset.Clean()
+ else:
+ patchset, resolver = classes[patchdir]
+
+ bb.note("Applying patch '%s' (%s)" % (pname, oe.path.format_display(local, d)))
try:
- patchset.Import({"file":unpacked, "remote":url, "strippath": pnum}, True)
+ patchset.Import({"file":local, "remote":url, "strippath": striplevel}, True)
except Exception:
import sys
raise bb.build.FuncFailed(str(sys.exc_value))
diff --git a/classes/pkgconfig.bbclass b/classes/pkgconfig.bbclass
index f3d93716d7..23ab453a53 100644
--- a/classes/pkgconfig.bbclass
+++ b/classes/pkgconfig.bbclass
@@ -1,9 +1,12 @@
DEPENDS_prepend = "pkgconfig-native "
do_install_prepend () {
-
-for i in `find ${S}/ -name "*.pc" -type f` ; do \
- sed -i -e 's:-L${STAGING_LIBDIR}::g' -e 's:-I${STAGING_INCDIR}::g' $i
+ for i in `find ${S}/ -name "*.pc" -type f` ; do \
+ sed -i -e 's:-L${STAGING_LIBDIR}::g' \
+ -e 's:-R${STAGING_LIBDIR}::g' \
+ -e 's:-I${STAGING_INCDIR}::g' \
+ -e 's:-isystem${STAGING_INCDIR}::g' \
+ $i
done
}
diff --git a/classes/python-dir.bbclass b/classes/python-dir.bbclass
index d631a5c3ff..7b0b80973a 100644
--- a/classes/python-dir.bbclass
+++ b/classes/python-dir.bbclass
@@ -4,7 +4,7 @@ def python_dir(d):
for majmin in "2.6 2.5 2.4 2.3".split():
if os.path.exists( "%s/python%s" % ( staging_incdir, majmin ) ): return "python%s" % majmin
if not "python-native" in bb.data.getVar( "DEPENDS", d, 1 ).split():
- raise "No Python in STAGING_INCDIR. Forgot to build python-native ?"
+ raise Exception("No Python in STAGING_INCDIR. Forgot to build python-native?")
return "INVALID"
PYTHON_DIR = "${@python_dir(d)}"
diff --git a/classes/qmake_base.bbclass b/classes/qmake_base.bbclass
index 4fbe21f2e1..577c0fab3b 100644
--- a/classes/qmake_base.bbclass
+++ b/classes/qmake_base.bbclass
@@ -1,6 +1,6 @@
OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
-QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
+QMAKESPEC = "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
# We override this completely to eliminate the -e normally passed in
EXTRA_OEMAKE = ' MAKEFLAGS= '
diff --git a/classes/recipe_sanity.bbclass b/classes/recipe_sanity.bbclass
index daae4aed5b..bb60ffa00e 100644
--- a/classes/recipe_sanity.bbclass
+++ b/classes/recipe_sanity.bbclass
@@ -51,7 +51,7 @@ def can_use_autotools_base(cfgdata, d):
if not bb.data.inherits_class("autotools", d):
return False
- for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "autotools_stage", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
+ for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
if cfg.find(i) != -1:
return False
diff --git a/classes/relocatable.bbclass b/classes/relocatable.bbclass
index eb5b9e62ed..2af3a7a1c4 100644
--- a/classes/relocatable.bbclass
+++ b/classes/relocatable.bbclass
@@ -55,27 +55,28 @@ def process_dir (directory, d):
# If the rpath shares a root with base_prefix determine a new dynamic rpath from the
# base_prefix shared root
if rpath.find(basedir) != -1:
- depth = fpath.partition(basedir)[2].count('/')
- libpath = rpath.partition(basedir)[2].strip()
+ fdir = os.path.dirname(fpath.partition(basedir)[2])
+ ldir = rpath.partition(basedir)[2].strip()
# otherwise (i.e. cross packages) determine a shared root based on the TMPDIR
# NOTE: This will not work reliably for cross packages, particularly in the case
# where your TMPDIR is a short path (i.e. /usr/poky) as chrpath cannot insert an
# rpath longer than that which is already set.
else:
- depth = fpath.rpartition(tmpdir)[2].count('/')
- libpath = rpath.partition(tmpdir)[2].strip()
+ fdir = os.path.dirname(fpath.rpartition(tmpdir)[2])
+ ldir = rpath.partition(tmpdir)[2].strip()
- base = "$ORIGIN"
- while depth > 1:
- base += "/.."
- depth-=1
- new_rpaths.append("%s%s" % (base, libpath))
+ try:
+ new_rpaths.append("$ORIGIN/%s" % oe.path.relative(fdir, ldir))
+ except ValueError:
+ # Some programs link in non-existant RPATH directories.
+ continue
# if we have modified some rpaths call chrpath to update the binary
if len(new_rpaths):
args = ":".join(new_rpaths)
#bb.note("Setting rpath for %s to %s" %(fpath,args))
- sub.call([cmd, '-r', args, fpath])
+ oe_system(d, [cmd, '-r', args, fpath], shell=False,
+ stdout=open("/dev/null", "a"))
if perms:
os.chmod(fpath, perms)
diff --git a/classes/rootfs_ipk.bbclass b/classes/rootfs_ipk.bbclass
index 9fa1d5ec6c..77e573c260 100644
--- a/classes/rootfs_ipk.bbclass
+++ b/classes/rootfs_ipk.bbclass
@@ -6,6 +6,7 @@
#
do_rootfs[depends] += "opkg-native:do_populate_sysroot"
+do_rootfs[lockfiles] = "${DEPLOY_DIR_IPK}.lock"
IPKG_TMP_DIR = "${IMAGE_ROOTFS}-tmp"
IPKG_ARGS = "-f ${IPKGCONF_TARGET} -o ${IMAGE_ROOTFS} -t ${IPKG_TMP_DIR} ${@base_conditional("PACKAGE_INSTALL_NO_DEPS", "1", "-nodeps", "", d)}"
@@ -40,7 +41,7 @@ fakeroot rootfs_ipk_do_rootfs () {
for i in ${BAD_RECOMMENDATIONS}; do
echo "Package: $i" >> $STATUS
echo "Architecture: ${TARGET_ARCH}" >> $STATUS
- echo "Status: deinstall ok not-installed" >> $STATUS
+ echo "Status: deinstall hold not-installed" >> $STATUS
echo >> $STATUS
done
@@ -58,6 +59,11 @@ fakeroot rootfs_ipk_do_rootfs () {
if [ ! -z "${PACKAGE_INSTALL}" ]; then
opkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL}
fi
+ if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
+ for i in ${PACKAGE_INSTALL_ATTEMPTONLY}; do
+ opkg-cl ${IPKG_ARGS} install $i 2>&1 || true
+ done > ${T}/log.do_rootfs-attemptonly.${PID}
+ fi
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
@@ -92,15 +98,19 @@ fakeroot rootfs_ipk_do_rootfs () {
else
rm -f ${IMAGE_ROOTFS}${libdir}/opkg/lists/*
fi
-
+
+ # Remove lists, but leave SHR's tmp dir if it exists.
+ rm -f ${IMAGE_ROOTFS}/var/lib/opkg/* || true
+
# Keep these lines until package manager selection is implemented
ln -s opkg ${IMAGE_ROOTFS}${sysconfdir}/ipkg
ln -s opkg ${IMAGE_ROOTFS}${libdir}/ipkg
else
rm -rf ${IMAGE_ROOTFS}${libdir}/opkg
rm -rf ${IMAGE_ROOTFS}/usr/lib/opkg
+ rm -rf ${IMAGE_ROOTFS}/var/lib/opkg
fi
-
+
log_check rootfs
rm -rf ${IPKG_TMP_DIR}
}
diff --git a/classes/sanity.bbclass b/classes/sanity.bbclass
index dfb3010f82..575530aa84 100644
--- a/classes/sanity.bbclass
+++ b/classes/sanity.bbclass
@@ -30,7 +30,9 @@ def check_sanity(e):
try:
from distutils.version import LooseVersion
except ImportError:
- def LooseVersion(v): print "WARNING: sanity.bbclass can't compare versions without python-distutils"; return 1
+ def LooseVersion(v):
+ bb.msg.warn(None, "sanity.bbclass can't compare versions without python-distutils")
+ return 1
import commands
# Check the bitbake version meets minimum requirements
@@ -38,7 +40,6 @@ def check_sanity(e):
if not minversion:
# Hack: BB_MIN_VERSION hasn't been parsed yet so return
# and wait for the next call
- print "Foo %s" % minversion
return
if 0 == os.getuid():
@@ -141,7 +142,7 @@ def check_sanity(e):
if not abi.isdigit():
f = file(abifile, "w")
f.write(current_abi)
- elif abi == "3" and current_abi == "4":
+ elif abi == "3" and current_abi == "4":
import bb
bb.note("Converting staging from layout version 2 to layout version 3")
os.system(bb.data.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots", e.data))
@@ -149,6 +150,8 @@ def check_sanity(e):
os.system(bb.data.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done", e.data))
f = file(abifile, "w")
f.write(current_abi)
+ elif abi == "5" and current_abi != "5":
+ messages = messages + "Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n"
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
messages = messages + "Error, TMPDIR has changed ABI (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi)
@@ -186,8 +189,8 @@ def check_sanity(e):
if messages != "":
raise_sanity_error(messages)
-addhandler check_sanity_eventhandler
python check_sanity_eventhandler() {
- if bb.event.getName(e) == "ConfigParsed":
+ if isinstance(e, bb.event.BuildStarted):
check_sanity(e)
}
+addhandler check_sanity_eventhandler
diff --git a/classes/shr-mirrors.bbclass b/classes/shr-mirrors.bbclass
new file mode 100644
index 0000000000..92a41ad680
--- /dev/null
+++ b/classes/shr-mirrors.bbclass
@@ -0,0 +1,13 @@
+PREMIRRORS_append () {
+cvs://.*/.* http://build.shr-project.org/sources/
+svn://.*/.* http://build.shr-project.org/sources/
+git://.*/.* http://build.shr-project.org/sources/
+hg://.*/.* http://build.shr-project.org/sources/
+bzr://.*/.* http://build.shr-project.org/sources/
+}
+
+MIRRORS_append () {
+ftp://.*/.* http://build.shr-project.org/sources/
+https?$://.*/.* http://build.shr-project.org/sources/
+}
+
diff --git a/classes/siteinfo.bbclass b/classes/siteinfo.bbclass
index d7f27054b0..0122bdb394 100644
--- a/classes/siteinfo.bbclass
+++ b/classes/siteinfo.bbclass
@@ -15,121 +15,85 @@
# It is an error for the target not to exist.
# If 'what' doesn't exist then an empty value is returned
#
-def get_siteinfo_list(d):
- target = bb.data.getVar('HOST_ARCH', d, 1) + "-" + bb.data.getVar('HOST_OS', d, 1)
+def siteinfo_data(d):
+ archinfo = {
+ "arm": "endian-little bit-32 arm-common",
+ "armeb": "endian-big bit-32 arm-common",
+ "avr32": "endian-big bit-32 avr32-common",
+ "bfin": "endian-little bit-32 bfin-common",
+ "i386": "endian-little bit-32 ix86-common",
+ "i486": "endian-little bit-32 ix86-common",
+ "i586": "endian-little bit-32 ix86-common",
+ "i686": "endian-little bit-32 ix86-common",
+ "ia64": "endian-little bit-64",
+ "mips": "endian-big bit-32 mips-common",
+ "mipsel": "endian-little bit-32 mips-common",
+ "powerpc": "endian-big bit-32 powerpc-common",
+ "nios2": "endian-little bit-32 nios2-common",
+ "powerpc64": "endian-big bit-64 powerpc-common powerpc64-linux",
+ "ppc": "endian-big bit-32 powerpc-common",
+ "ppc64": "endian-big bit-64 powerpc-common powerpc64-linux",
+ "sh3": "endian-little bit-32 sh-common",
+ "sh4": "endian-little bit-32 sh-common",
+ "sparc": "endian-big bit-32",
+ "viac3": "endian-little bit-32 ix86-common",
+ "x86_64": "endian-little bit-64",
+ }
+ osinfo = {
+ "darwin": "common-darwin",
+ "darwin9": "common-darwin",
+ "linux": "common-linux common-glibc",
+ "linux-gnueabi": "common-linux common-glibc",
+ "linux-gnuspe": "common-linux common-glibc",
+ "linux-uclibc": "common-linux common-uclibc",
+ "linux-uclibceabi": "common-linux common-uclibc",
+ "linux-uclibcspe": "common-linux common-uclibc",
+ "uclinux-uclibc": "common-uclibc",
+ "cygwin": "common-cygwin",
+ "mingw32": "common-mingw",
+ }
+ targetinfo = {
+ "arm-linux-gnueabi": "arm-linux",
+ "arm-linux-uclibceabi": "arm-linux-uclibc",
+ "armeb-linux-gnueabi": "armeb-linux",
+ "armeb-linux-uclibceabi": "armeb-linux-uclibc",
+ "powerpc-linux-gnuspe": "powerpc-linux",
+ "powerpc-linux-uclibcspe": "powerpc-linux-uclibc",
+ }
- targetinfo = {\
- "armeb-linux": "endian-big bit-32 common-linux common-glibc arm-common",\
- "armeb-linux-gnueabi": "endian-big bit-32 common-linux common-glibc arm-common armeb-linux",\
- "armeb-linux-uclibc": "endian-big bit-32 common-linux common-uclibc arm-common",\
- "armeb-linux-uclibceabi": "endian-big bit-32 common-linux common-uclibc arm-common armeb-linux-uclibc",\
- "arm-darwin": "endian-little bit-32 common-darwin",\
- "arm-darwin9": "endian-little bit-32 common-darwin",\
- "arm-linux": "endian-little bit-32 common-linux common-glibc arm-common",\
- "arm-linux-gnueabi": "endian-little bit-32 common-linux common-glibc arm-common arm-linux",\
- "arm-linux-uclibc": "endian-little bit-32 common-linux common-uclibc arm-common",\
- "arm-linux-uclibceabi": "endian-little bit-32 common-linux common-uclibc arm-common arm-linux-uclibc",\
- "avr32-linux-uclibc": "endian-big bit-32 common-linux common-uclibc avr32-common",\
- "bfin-uclinux-uclibc": "endian-little bit-32 common-uclibc bfin-common",\
- "i386-linux": "endian-little bit-32 common-linux common-glibc ix86-common",\
- "i486-linux": "endian-little bit-32 common-linux common-glibc ix86-common",\
- "i586-linux": "endian-little bit-32 common-linux common-glibc ix86-common",\
- "i686-linux": "endian-little bit-32 common-linux common-glibc ix86-common",\
- "i386-linux-uclibc": "endian-little bit-32 common-linux common-uclibc ix86-common",\
- "i486-linux-uclibc": "endian-little bit-32 common-linux common-uclibc ix86-common",\
- "i586-linux-uclibc": "endian-little bit-32 common-linux common-uclibc ix86-common",\
- "i686-linux-uclibc": "endian-little bit-32 common-linux common-uclibc ix86-common",\
- "i386-cygwin": "endian-little bit-32 common-cygwin ix86-common",\
- "i486-cygwin": "endian-little bit-32 common-cygwin ix86-common",\
- "i586-cygwin": "endian-little bit-32 common-cygwin ix86-common",\
- "i686-cygwin": "endian-little bit-32 common-cygwin ix86-common",\
- "i386-mingw32": "endian-little bit-32 common-mingw ix86-common",\
- "i486-mingw32": "endian-little bit-32 common-mingw ix86-common",\
- "i586-mingw32": "endian-little bit-32 common-mingw ix86-common",\
- "i686-mingw32": "endian-little bit-32 common-mingw ix86-common",\
- "ia64-linux": "endian-little bit-64 common-linux common-glibc",\
- "mipsel-linux": "endian-little bit-32 common-linux common-glibc mips-common",\
- "mipsel-linux-uclibc": "endian-little bit-32 common-linux common-uclibc mips-common",\
- "mips-linux": "endian-big bit-32 common-linux common-glibc mips-common",\
- "mips-linux-uclibc": "endian-big bit-32 common-linux common-uclibc mips-common",\
- "powerpc-darwin": "endian-big bit-32 common-darwin",\
- "ppc-linux": "endian-big bit-32 common-linux common-glibc powerpc-common powerpc-linux",\
- "ppc64-linux": "endian-big bit-64 common-linux common-glibc powerpc-common powerpc64-linux",\
- "powerpc-linux": "endian-big bit-32 common-linux common-glibc powerpc-common",\
- "powerpc-linux-gnuspe": "endian-big bit-32 common-linux common-glibc powerpc-common powerpc-linux",\
- "powerpc-linux-uclibc": "endian-big bit-32 common-linux common-uclibc powerpc-common",\
- "powerpc-linux-uclibcspe": "endian-big bit-32 common-linux common-uclibc powerpc-common powerpc-linux-uclibc",\
- "sh3-linux": "endian-little bit-32 common-linux common-glibc sh-common",\
- "sh4-linux": "endian-little bit-32 common-linux common-glibc sh-common",\
- "sh4-linux-uclibc": "endian-little bit-32 common-linux common-uclibc sh-common",\
- "sparc-linux": "endian-big bit-32 common-linux common-glibc",\
- "viac3-linux": "endian-little bit-32 common-linux common-glibc ix86-common",\
- "x86_64-linux": "endian-little bit-64 common-linux common-glibc",\
- "x86_64-linux-uclibc": "endian-little bit-64 common-linux common-uclibc"}
- if target in targetinfo:
- info = targetinfo[target].split()
- info.append(target)
- info.append("common")
- return info
- else:
- bb.error("Information not available for target '%s'" % target)
+ arch = d.getVar("HOST_ARCH", True)
+ os = d.getVar("HOST_OS", True)
+ target = "%s-%s" % (arch, os)
+ sitedata = []
+ if arch in archinfo:
+ sitedata.extend(archinfo[arch].split())
+ if os in osinfo:
+ sitedata.extend(osinfo[os].split())
+ if target in targetinfo:
+ sitedata.extend(targetinfo[target].split())
+ sitedata.append(target)
+ sitedata.append("common")
-#
-# Define which site files to use. We check for several site files and
-# use each one that is found, based on the list returned by get_siteinfo_list()
-#
-# Search for the files in the following directories:
-# 1) ${BBPATH}/site (in reverse) - app specific, then site wide
-# 2) ${FILE_DIRNAME}/site-${PV} - app version specific
-#
-def siteinfo_get_files(d):
- sitefiles = ""
-
- # Determine which site files to look for
- sites = get_siteinfo_list(d)
- sites.append("common");
-
- # Check along bbpath for site files and append in reverse order so
- # the application specific sites files are last and system site
- # files first.
- path_bb = bb.data.getVar('BBPATH', d, 1)
- for p in (path_bb or "").split(':'):
- tmp = ""
- for i in sites:
- fname = os.path.join(p, 'site', i)
- if os.path.exists(fname):
- tmp += fname + " "
- sitefiles = tmp + sitefiles;
+ return sitedata
- # Now check for the applications version specific site files
- path_pkgv = os.path.join(bb.data.getVar('FILE_DIRNAME', d, 1), "site-" + bb.data.getVar('PV', d, 1))
- for i in sites:
- fname = os.path.join(path_pkgv, i)
- if os.path.exists(fname):
- sitefiles += fname + " "
+python () {
+ sitedata = set(siteinfo_data(d))
+ if "endian-little" in sitedata:
+ d.setVar("SITEINFO_ENDIANNESS", "le")
+ elif "endian-big" in sitedata:
+ d.setVar("SITEINFO_ENDIANNESS", "be")
+ else:
+ bb.error("Unable to determine endianness for architecture '%s'" %
+ d.getVar("HOST_ARCH", True))
+ bb.fatal("Please add your architecture to siteinfo.bbclass")
- bb.debug(1, "SITE files " + sitefiles);
- return sitefiles
-
-def siteinfo_get_endianess(d):
- info = get_siteinfo_list(d)
- if 'endian-little' in info:
- return "le"
- elif 'endian-big' in info:
- return "be"
- bb.error("Site info could not determine endianess for target")
-
-def siteinfo_get_bits(d):
- info = get_siteinfo_list(d)
- if 'bit-32' in info:
- return "32"
- elif 'bit-64' in info:
- return "64"
- bb.error("Site info could not determine bit size for target")
-
-#
-# Make some information available via variables
-#
-SITEINFO_ENDIANESS = "${@siteinfo_get_endianess(d)}"
-SITEINFO_BITS = "${@siteinfo_get_bits(d)}"
+ if "bit-32" in sitedata:
+ d.setVar("SITEINFO_BITS", "32")
+ elif "bit-64" in sitedata:
+ d.setVar("SITEINFO_BITS", "64")
+ else:
+ bb.error("Unable to determine bit size for architecture '%s'" %
+ d.getVar("HOST_ARCH", True))
+ bb.fatal("Please add your architecture to siteinfo.bbclass")
+}
diff --git a/classes/sourceipk.bbclass b/classes/sourceipk.bbclass
new file mode 100644
index 0000000000..4957e4af4c
--- /dev/null
+++ b/classes/sourceipk.bbclass
@@ -0,0 +1,131 @@
+# sourceipk.bbclass enables the creation of an ipk file that contains the
+# sources used during the build. The sources contained in the ipk are the
+# patched sources before configuration has been done.
+#
+# This class is used to provide an easy method to ship the corresponding
+# sources for a package to end users so that they can install them on their
+# host or target systems.
+#
+# This package uses the following variables to control its operations:
+# - CREATE_SRCIPK = When set to 1 this variable indicates that
+# a source ipk should be generated for the package.
+# - SRCIPK_INSTALL_DIR = This variable indicates the directory to install
+# the sources into.
+# - SRCIPK_PACKAGE_ARCH = This variable allows specific recipies to
+# specify an architecture for the sourcetree
+# package is "all" is not appropriate
+#
+# The default installation directory for the sources is:
+# /usr/src/${PN}-src
+#
+# By setting the SRCIPK_INSTALL_DIR this default can be changed to any
+# location desired. When combined with the opkg -o option this allows for the
+# system building to specify a relative package install location to the
+# install root given to opkg. Each source ipk can have a different directory.
+#
+# Creation of the source ipk can be controlled per package by setting
+# CREATE_SRCIPK = "1" in the package recipe or by setting
+# CREATE_SRCIPK_pn-<package name> = "1" in your local.conf
+#
+#TODO:
+# Need to figure out how to use ipkg-build in this class.
+# I tried adding it as a dependency for the do_create_srcipk
+# task using:
+# do_create_srcipk[depends] += "ipkg-utils-native:do_populate_sysroot"
+# But then there is a circular dependency between sourcipk.bbclass and
+# ipkg-utils-native. Until I can figure out how to resolve this
+# circular dependency I am extracting the needed pieces from ipkg-build
+# into this class and building the source ipk myself.
+
+
+# Default is to not create the source ipk
+CREATE_SRCIPK ?= "0"
+
+# Default installation prefix
+SRCIPK_INSTALL_DIR ?= "/usr/src/${PN}-src"
+
+# Default PACKAGE_ARCH for sources is "all"
+SRCIPK_PACKAGE_ARCH ?= "all"
+
+# Create a README file that describes the contents of the source ipk
+sourceipk_create_readme() {
+ readme="$1/README.${PN}-src"
+ touch $readme
+ echo 'This package contains the patched sources for ${PN} that' >> $readme
+ echo 'were used to generate the ${PN} binary ipk package(s).' >> $readme
+ echo 'This package does not build or generate the binaries' >> $readme
+ echo 'directly. To build the binaries you must either' >> $readme
+ echo 'configure and build the sources yourself or use:' >> $readme
+ echo ' bitbake ${PN}' >> $readme
+ echo '' >> $readme
+ echo 'NOTE: The patches applied to the sources can be found in' >> $readme
+ echo " the \"patches\" directory" >> $readme
+}
+
+# Create the source ipk file. The ipk is manually created here instead
+# of using the normal ipk system because some recipes will over write
+# the PACKAGES variable. Thus if this class added a -src package
+# to the list of packages to be created that package would be lost.
+# See the linux kernel recipe for an example of this issue.
+sourceipk_do_create_srcipk() {
+ if [ ${CREATE_SRCIPK} != "0" ]
+ then
+ tmp_dir="${WORKDIR}/sourceipk-tmp"
+ srcipk_dir="${WORKDIR}/sourceipk-data"
+ mkdir -p $tmp_dir/CONTROL
+ mkdir -p $srcipk_dir
+ control_file="$tmp_dir/CONTROL/control"
+
+ # Write the control file
+ echo "Package: ${PN}-src" > $control_file
+ echo "Version: ${PV}-${PR}" >> $control_file
+ echo "Description: Patched sources for ${PN}" >> $control_file
+ echo "Section: ${SECTION}" >> $control_file
+ echo "Priority: Optional" >> $control_file
+ echo "Maintainer: ${MAINTAINER}" >> $control_file
+ echo "License: ${LICENSE}" >> $control_file
+ echo "Architecture: ${SRCIPK_PACKAGE_ARCH}" >> $control_file
+ srcuri="${SRC_URI}"
+ if [ "$srcuri" == "" ]
+ then
+ srcuri="OpenEmbedded"
+ fi
+ echo "Source: $srcuri" >> $control_file
+
+ #Write the control tarball
+ tar -C $tmp_dir/CONTROL --owner=0 --group=0 -czf $srcipk_dir/control.tar.gz .
+
+ # Get rid of temporary control file
+ rm -rf $tmp_dir/CONTROL
+
+ # Copy sources for packaging
+ mkdir -p $tmp_dir/${SRCIPK_INSTALL_DIR}
+ cp -rLf ${S}/* $tmp_dir/${SRCIPK_INSTALL_DIR}/
+ sourceipk_create_readme $tmp_dir/${SRCIPK_INSTALL_DIR}/
+ cp ${FILE} $tmp_dir/${SRCIPK_INSTALL_DIR}/
+
+ #Write the data tarball
+ tar -C $tmp_dir --owner=0 --group=0 -czf $srcipk_dir/data.tar.gz .
+
+ # Create the debian-binary file
+ echo "2.0" > $srcipk_dir/debian-binary
+
+ #Write the ipk file
+ mkdir -p ${DEPLOY_DIR_IPK}/${SRCIPK_PACKAGE_ARCH}
+ pkg_file=${DEPLOY_DIR_IPK}/${SRCIPK_PACKAGE_ARCH}/${PN}-src_${PV}-${PR}_${SRCIPK_PACKAGE_ARCH}.ipk
+ rm -f $pkg_file
+ ( cd $srcipk_dir && ar -crf $pkg_file ./debian-binary ./data.tar.gz ./control.tar.gz )
+
+ # Remove the temporary directory
+ rm -rf $tmp_dir
+ fi
+}
+
+EXPORT_FUNCTIONS do_create_srcipk
+
+do_create_srcipk[deptask] = "do_patch"
+
+addtask create_srcipk after do_patch before do_configure
+
+#Add source packages to list of packages OE knows about
+PACKAGES_DYNAMIC += "${PN}-src"
diff --git a/classes/srctree.bbclass b/classes/srctree.bbclass
index 7232c26b12..1a88613a0b 100644
--- a/classes/srctree.bbclass
+++ b/classes/srctree.bbclass
@@ -50,7 +50,7 @@ def merge_tasks(d):
__gather_taskdeps(task, items)
return items
- newtask = "do_populate_sysroot"
+ newtask = "do_populate_sysroot_post"
mergedtasks = gather_taskdeps(newtask)
mergedtasks.pop()
deltasks = gather_taskdeps("do_patch")
@@ -83,17 +83,22 @@ def merge_tasks(d):
d.setVarFlag(task, "deps", deps)
# Pull cross recipe task deps over
- depends = (d.getVarFlag(task, "depends") or ""
- for task in mergedtasks[:-1]
- if not task in deltasks)
- d.setVarFlag("do_populate_sysroot", "depends", " ".join(depends))
+ depends = []
+ deptask = []
+ for task in mergedtasks[:-1]:
+ if not task in deltasks:
+ depends.append(d.getVarFlag(task, "depends") or "")
+ deptask.append(d.getVarFlag(task, "deptask") or "")
+
+ d.setVarFlag("do_populate_sysroot_post", "depends", " ".join(depends))
+ d.setVarFlag("do_populate_sysroot_post", "deptask", " ".join(deptask))
python () {
merge_tasks(d)
}
-# Manually run do_install & all of its deps, then do_stage
-python do_populate_sysroot () {
+# Manually run do_install & all of its deps
+python do_populate_sysroot_post () {
from os.path import exists
from bb.build import exec_task, exec_func
from bb import note
@@ -105,11 +110,11 @@ python do_populate_sysroot () {
if not dep in seen:
rec_exec_task(dep, seen)
seen.add(task)
- #if not exists("%s.%s" % (stamp, task)):
- note("%s: executing task %s" % (d.getVar("PF", True), task))
- exec_task(task, d)
+ if not exists("%s.%s" % (stamp, task)):
+ note("%s: executing task %s" % (d.getVar("PF", True), task))
+ exec_func(task, d)
- rec_exec_task("do_install", set())
- exec_func("do_stage", d)
+ rec_exec_task("do_populate_sysroot", set())
}
-do_populate_sysroot[lockfiles] += "${S}/.lock"
+addtask populate_sysroot_post after do_populate_sysroot
+do_populate_sysroot_post[lockfiles] += "${S}/.lock"
diff --git a/classes/staging.bbclass b/classes/staging.bbclass
index e80644961c..b871226811 100644
--- a/classes/staging.bbclass
+++ b/classes/staging.bbclass
@@ -168,7 +168,8 @@ python do_populate_sysroot () {
#os.system('cp -pPR %s/* %s/' % (dest, sysrootdest))
for f in (bb.data.getVar('SYSROOT_PREPROCESS_FUNCS', d, True) or '').split():
bb.build.exec_func(f, d)
- bb.build.exec_func("packagedstaging_fastpath", d)
+ if pstageactive:
+ bb.build.exec_func("packagedstaging_fastpath", d)
lock = bb.utils.lockfile(lockfile)
os.system(bb.data.expand('cp -pPR ${SYSROOT_DESTDIR}${TMPDIR}/* ${TMPDIR}/', d))
diff --git a/classes/testlab.bbclass b/classes/testlab.bbclass
index e6c9c8e284..b8ba9fec56 100644
--- a/classes/testlab.bbclass
+++ b/classes/testlab.bbclass
@@ -36,14 +36,14 @@ if [ -e ${IMAGE_ROOTFS}/etc/opkg ] && [ "${ONLINE_PACKAGE_MANAGEMENT}" = "full"
echo -e "digraph depends {\n node [shape=plaintext]" > ${TESTLAB_DIR}/depends.dot
for pkg in $(opkg-cl ${IPKG_ARGS} list_installed | awk '{print $1}') ; do
- opkg-cl ${IPKG_ARGS} info $pkg | awk '/Package/ {printf $2"_"} /Version/ {printf $2"_"} /Archi/ {print $2".ipk"}' >> ${TESTLAB_DIR}/installed-packages.txt
+ opkg-cl ${IPKG_ARGS} info $pkg | grep -B 7 -A 7 "^Status.* \(\(installed\)\|\(unpacked\)\)" | awk '/^Package/ {printf $2"_"} /^Version/ {printf $2"_"} /^Archi/ {print $2".ipk"}' >> ${TESTLAB_DIR}/installed-packages.txt
- for depends in $(opkg-cl ${IPKG_ARGS} info $pkg | grep Depends) ; do
- echo "$pkg OPP $depends;" | grep -v "(" | grep -v ")" | grep -v Depends | sed -e 's:,::g' -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' |sed 's:OPP:->:g' >> ${TESTLAB_DIR}/depends.dot
+ for depends in $(opkg-cl ${IPKG_ARGS} info $pkg | grep ^Depends) ; do
+ echo "$pkg OPP $depends;" | grep -v "(" | grep -v ")" | grep -v "$pkg OPP Depends" | sed -e 's:,::g' -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' |sed 's:OPP:->:g' >> ${TESTLAB_DIR}/depends.dot
done
- for recommends in $(opkg-cl ${IPKG_ARGS} info $pkg | grep Recom) ; do
- echo "$pkg OPP $recommends [style=dotted];" | grep -v "(" | grep -v ")" | grep -v Recom | sed -e 's:,::g' -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' |sed 's:OPP:->:g' >> ${TESTLAB_DIR}/depends.dot
+ for recommends in $(opkg-cl ${IPKG_ARGS} info $pkg | grep ^Recom) ; do
+ echo "$pkg OPP $recommends [style=dotted];" | grep -v "(" | grep -v ")" | grep -v "$pkg OPP Recom" | sed -e 's:,::g' -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' |sed 's:OPP:->:g' >> ${TESTLAB_DIR}/depends.dot
done
done
diff --git a/classes/utils.bbclass b/classes/utils.bbclass
index 4e352e3b9c..0a7a045cc4 100644
--- a/classes/utils.bbclass
+++ b/classes/utils.bbclass
@@ -38,6 +38,24 @@ def oe_filter(f, str, d):
def oe_filter_out(f, str, d):
return oe.utils.str_filter_out(f, str, d)
+def machine_paths(d):
+ """List any existing machine specific filespath directories"""
+ machine = d.getVar("MACHINE", True)
+ filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
+ for basepath in d.getVar("FILESPATHBASE", True).split(":"):
+ for pkgpath in filespathpkg:
+ machinepath = os.path.join(basepath, pkgpath, machine)
+ if os.path.isdir(machinepath):
+ yield machinepath
+
+def is_machine_specific(d):
+ """Determine whether the current recipe is machine specific"""
+ machinepaths = set(machine_paths(d))
+ urldatadict = bb.fetch.init(d.getVar("SRC_URI", True).split(), d, True)
+ for urldata in (urldata for urldata in urldatadict.itervalues()
+ if urldata.type == "file"):
+ if any(urldata.path.startswith(mp + "/") for mp in machinepaths):
+ return True
def subprocess_setup():
import signal
@@ -45,6 +63,24 @@ def subprocess_setup():
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+def oe_run(d, cmd, **kwargs):
+ """Convenience function to run a command and return its output, raising an
+ exception when the command fails"""
+ from subprocess import PIPE
+
+ options = {
+ "stdout": PIPE,
+ "stderr": PIPE,
+ "shell": True,
+ }
+ options.update(kwargs)
+ pipe = oe_popen(d, cmd, **options)
+ stdout, stderr = pipe.communicate()
+ if pipe.returncode != 0:
+ raise RuntimeError("Execution of '%s' failed with '%s':\n%s" %
+ (cmd, pipe.returncode, stderr))
+ return stdout
+
def oe_popen(d, cmd, **kwargs):
""" Convenience function to call out processes with our exported
variables in the environment.
@@ -61,13 +97,16 @@ def oe_popen(d, cmd, **kwargs):
d.setVar("__oe_popen_env", env)
kwargs["env"] = env
+ kwargs["close_fds"] = True
kwargs["preexec_fn"] = subprocess_setup
return Popen(cmd, **kwargs)
-def oe_system(d, cmd):
+def oe_system(d, cmd, **kwargs):
""" Popen based version of os.system. """
- return oe_popen(d, cmd, shell=True).wait()
+ if not "shell" in kwargs:
+ kwargs["shell"] = True
+ return oe_popen(d, cmd, **kwargs).wait()
# for MD5/SHA handling
def base_chk_load_parser(config_paths):
@@ -88,7 +127,9 @@ def setup_checksum_deps(d):
(depends, "shasum-native:do_populate_sysroot"))
def base_chk_file_checksum(localpath, src_uri, expected_md5sum, expected_sha256sum, data):
- strict_checking = bb.data.getVar("OE_STRICT_CHECKSUMS", data, True)
+ strict_checking = True
+ if bb.data.getVar("OE_STRICT_CHECKSUMS", data, True) != "1":
+ strict_checking = False
if not os.path.exists(localpath):
localpath = base_path_out(localpath, data)
bb.note("The localpath does not exist '%s'" % localpath)
diff --git a/classes/xfce46.bbclass b/classes/xfce46.bbclass
index 7e3e41877b..c24dfa4d8c 100644
--- a/classes/xfce46.bbclass
+++ b/classes/xfce46.bbclass
@@ -11,7 +11,7 @@ SECTION ?= "x11/xfce"
XFCE_VERSION = ${PV}
-SRC_URI = "http://mocha.xfce.org/archive/src/xfce/${PN}/${@'${PV}'[0:3]}/${PN}-${PV}.tar.bz2;name=archive"
+SRC_URI = "http://mocha.xfce.org/archive/src/xfce/${PN}/${@'${PV}'[0:3]}/${PN}-${PV}.tar.bz2"
inherit autotools gtk-icon-cache pkgconfig
diff --git a/classes/xilinx-bsp.bbclass b/classes/xilinx-bsp.bbclass
index d32c60ec73..eb47634da2 100644
--- a/classes/xilinx-bsp.bbclass
+++ b/classes/xilinx-bsp.bbclass
@@ -1,7 +1,8 @@
# Copyright (C) 2007, Stelios Koroneos - Digital OPSiS, All Rights Reserved
+# Copyright (C) 2010, Adrian Alonso <aalonso00@gmail.com>
# Released under the MIT license (see packages/COPYING)
#
-#This class handles all the intricasies of getting the required files from the
+#This class handles all the intricasies of getting the required files from the
#ISE/EDK/project to the kernel and prepare the kernel for compilation.
#The Xilinx EDK supports 2 different architectures : PowerPC (ppc 405,440) and Microblaze
#Only the PowerPC BSP has been tested so far
@@ -10,46 +11,108 @@
#XILINX_BSP_PATH should have the complete path to your project dir
#XILINX_BOARD should have the board type i.e ML403
#
-#Currently supported boards
-#Xilinx ML403
+#Currently tested on
+#Xilinx ML405
#Xilinx ML507
#More to come soon ;)
-do_configure_prepend() {
+def map_target(a, d):
+ import re
+ board = bb.data.getVar('XILINX_BOARD', d, 1)
+ cpu = bb.data.getVar('TARGET_CPU', d, 1)
+
+ if re.match('powerpc', a):
+ return 'ppc' + cpu + '-' + board
+ else:
+ return 'system'
+
+def uboot_machine(a, d):
+ import re
+
+ board = bb.data.getVar('XILINX_BOARD', d, 1)
+ if board in ['ml300', 'ml401', 'ml403', 'ml405', 'ml507', 'ml510']:
+ if re.match('powerpc', a):
+ if board == 'ml403':
+ return 'ml401_config'
+ elif board == 'ml510':
+ return 'ml507_config'
+ else:
+ return board + '_config'
+ else:
+ return 'microblaze-generic_config'
+
+def uboot_target(a, d):
+ import re
+ board = bb.data.getVar('XILINX_BOARD', d, 1)
+ target = bb.data.getVar('TARGET_CPU', d, 1) + '-generic'
+ if board in ['ml300', 'ml401', 'ml403', 'ml405', 'ml507', 'ml510']:
+ if re.match('powerpc', a):
+ if board == 'ml403':
+ return 'ml401'
+ elif board == 'ml510':
+ return 'ml507'
+ else:
+ return board
+ else:
+ return target
+
+do_configure_prepend() {
#first check that the XILINX_BSP_PATH and XILINX_BOARD have been defined in local.conf
#now depending on the board type and arch do what is nessesary
-
if [ -n "${XILINX_BSP_PATH}" ]; then
- case "${XILINX_BOARD}" in
- ml403 | ML403)
- oenote "ML403 board setup"
- cp -pPR ${XILINX_BSP_PATH}/ppc405_0/libsrc/linux_2_6_v1_00_a/linux/arch/ppc/platforms/4xx/xparameters/xparameters_ml40x.h \
- ${S}/arch/ppc/platforms/4xx/xparameters/xparameters_ml403.h
- ;;
- ml507 | ML507)
- oenote "Xilinx ML507 board setup"
+ if [ -n "${XILINX_BOARD}" ]; then
+ if [ -d "${S}/arch/${TARGET_ARCH}/boot" ]; then
dts=`find "${XILINX_BSP_PATH}" -name *.dts -print`
- if [ -n "$dts" ]; then
- oenote "Replacing device tree with ${dts}"
- cp -pP ${dts} ${S}/arch/powerpc/boot/dts/virtex440-ml507.dts
+ if [ -e "$dts" ]; then
+ oenote "Replacing device tree to match hardware model"
+ if [ "${TARGET_ARCH}" == "powerpc" ]; then
+ cp -pP ${dts} ${S}/arch/powerpc/boot/dts/virtex${TARGET_BOARD}.dts
+ else
+ cp -pP ${dts} ${S}/arch/microblaze/platform/generic/${TARGET_BOARD}.dts
+ fi
+ else
+ oefatal "No device tree found, missing hardware ref design?"
+ exit 1
+ fi
+ elif [ -d "${S}/board/xilinx" ]; then
+ oenote "Replacing xparameters header to match hardware model"
+ if [ "${TARGET_ARCH}" == "powerpc" ]; then
+ xparam="${XILINX_BSP_PATH}/ppc${TARGET_CPU}_0/include/xparameters.h"
+ cpu="PPC`echo ${TARGET_CPU} | tr '[:lower:]' '[:upper:]'`"
else
- oenote "Device tree not found in project dir"
+ xparam="${XILINX_BSP_PATH}/${TARGET_CPU}_0/include/xparameters.h"
+ cpu=`echo ${TARGET_CPU} | tr '[:lower:]' '[:upper:]'`
fi
- ;;
- *)
- oefatal "! Unknow Xilinx board ! Exit ..."
- exit 1
- ;;
- esac
+ if [ -e "$xparam" ]; then
+ cp ${xparam} ${S}/board/xilinx/${UBOOT_TARGET}
+ echo "/*** Cannonical definitions ***/
+#define XPAR_PLB_CLOCK_FREQ_HZ XPAR_PROC_BUS_0_FREQ_HZ
+#define XPAR_CORE_CLOCK_FREQ_HZ XPAR_CPU_${cpu}_CORE_CLOCK_FREQ_HZ
+#ifndef XPAR_DDR2_SDRAM_MEM_BASEADDR
+# define XPAR_DDR2_SDRAM_MEM_BASEADDR XPAR_DDR_SDRAM_MPMC_BASEADDR
+#endif
+#define XPAR_PCI_0_CLOCK_FREQ_HZ 0" >> ${S}/board/xilinx/${UBOOT_TARGET}/xparameters.h
+ else
+ oefatal "No xparameters header file found, missing hardware ref design?"
+ exit 1
+ fi
+ fi
+ else
+ oefatal "XILINX_BOARD not defined ! Exit"
+ exit 1
+ fi
else
oefatal "XILINX_BSP_PATH not defined ! Exit"
exit 1
fi
-
}
-
-
-
-
+do_deploy_prepend() {
+# Install u-boot elf image
+if [ -d "${XILINX_BSP_PATH}" ]; then
+ if [ -e "${S}/u-boot" ]; then
+ install ${S}/u-boot ${XILINX_BSP_PATH}
+ fi
+fi
+}