summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@rpsys.net>2006-09-28 23:03:39 +0000
committerRichard Purdie <rpurdie@rpsys.net>2006-09-28 23:03:39 +0000
commit51621fdd009d6fa24b0355dfa301bc670595a6dc (patch)
tree9565e722ec51aec533433b5427dddee7dd6674a5
parentc76a46c93048f5c097d5c83aa007383d43eac56a (diff)
downloadopenembedded-51621fdd009d6fa24b0355dfa301bc670595a6dc.tar.gz
Sync minor classes with .dev
-rw-r--r--classes/binconfig.bbclass11
-rw-r--r--classes/cpan.bbclass11
-rw-r--r--classes/debian.bbclass4
-rw-r--r--classes/efl.bbclass2
-rw-r--r--classes/gconf.bbclass2
-rw-r--r--classes/gpe.bbclass1
-rw-r--r--classes/icecc.bbclass120
-rw-r--r--classes/image_ipk.bbclass29
-rw-r--r--classes/insane.bbclass100
-rw-r--r--classes/kernel-arch.bbclass1
-rw-r--r--classes/kernel.bbclass43
-rw-r--r--classes/module.bbclass2
-rw-r--r--classes/opie.bbclass4
-rw-r--r--classes/patch.bbclass490
-rw-r--r--classes/pkgconfig.bbclass2
-rw-r--r--classes/rm_work.bbclass2
-rw-r--r--classes/rootfs_ipk.bbclass14
-rw-r--r--classes/sanity.bbclass24
-rw-r--r--classes/sdl.bbclass2
-rw-r--r--classes/tinderclient.bbclass107
20 files changed, 850 insertions, 121 deletions
diff --git a/classes/binconfig.bbclass b/classes/binconfig.bbclass
index bf15ebcdf9..34021845ee 100644
--- a/classes/binconfig.bbclass
+++ b/classes/binconfig.bbclass
@@ -5,11 +5,12 @@ def get_binconfig_mangle(d):
import bb.data
s = "-e ''"
if not bb.data.inherits_class('native', d):
- s += " -e 's:=${libdir}:=OELIBDIR:;'"
- s += " -e 's:=${includedir}:=OEINCDIR:;'"
- s += " -e 's:=${datadir}:=OEDATADIR:'"
- s += " -e 's:=${prefix}:=OEPREFIX:'"
- s += " -e 's:=${exec_prefix}:=OEEXECPREFIX:'"
+ optional_quote = r"\(\"\?\)"
+ s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
+ s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
+ s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
+ s += " -e 's:=%s${prefix}:=\\1OEPREFIX:'" % optional_quote
+ s += " -e 's:=%s${exec_prefix}:=\\1OEEXECPREFIX:'" % optional_quote
s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
diff --git a/classes/cpan.bbclass b/classes/cpan.bbclass
index 853abfd1b3..c48bd1a980 100644
--- a/classes/cpan.bbclass
+++ b/classes/cpan.bbclass
@@ -1,10 +1,17 @@
+#
+# This is for perl modules that use the old Makefile.PL build system
+#
FILES_${PN} += '${libdir}/perl5'
+EXTRA_CPANFLAGS = ""
+
+DEPENDS += "perl-native"
+RDEPENDS += "perl"
cpan_do_configure () {
- perl Makefile.PL
+ perl Makefile.PL ${EXTRA_CPANFLAGS}
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
. ${STAGING_DIR}/${TARGET_SYS}/perl/config.sh
- sed -e "s:\(SITELIBEXP = \).*:\1${sitelibexp}:; s:\(SITEARCHEXP = \).*:\1${sitearchexp}:; s:\(INSTALLVENDORLIB = \).*:\1${D}${libdir}/perl5:; s:\(INSTALLVENDORARCH = \).*:\1${D}${libdir}/perl5:" < Makefile > Makefile.new
+ sed -e "s:\(SITELIBEXP = \).*:\1${sitelibexp}:; s:\(SITEARCHEXP = \).*:\1${sitearchexp}:; s:\(INSTALLVENDORLIB = \).*:\1${D}${libdir}/perl5/site_perl/${version}:; s:\(INSTALLVENDORARCH = \).*:\1${D}${libdir}/perl5/site_perl/${version}:" < Makefile > Makefile.new
mv Makefile.new Makefile
fi
}
diff --git a/classes/debian.bbclass b/classes/debian.bbclass
index 5688dad93b..698d917b51 100644
--- a/classes/debian.bbclass
+++ b/classes/debian.bbclass
@@ -6,6 +6,10 @@ STAGING_PKGMAPS_DIR = "${STAGING_DIR}/pkgmaps/debian"
# depends are correct
BUILD_ALL_DEPS = "1"
+# Better expressed as ensure all RDEPENDS package before we package
+# This means we can't have circular RDEPENDS/RRECOMMENDS
+do_package[rdeptask] = "do_package"
+
python debian_package_name_hook () {
import glob, copy, stat, errno, re
diff --git a/classes/efl.bbclass b/classes/efl.bbclass
index 9c490284c2..c258758d30 100644
--- a/classes/efl.bbclass
+++ b/classes/efl.bbclass
@@ -44,6 +44,6 @@ do_stage_append () {
}
PACKAGES = "${PN} ${PN}-themes ${PN}-dev ${PN}-examples"
-FILES_${PN}-dev = "${bindir}/${PN}-config ${libdir}/pkgconfig ${libdir}/lib*.?a ${libdir}/lib*.a"
+FILES_${PN}-dev = "${bindir}/${PN}-config ${libdir}/pkgconfig ${libdir}/lib*.?a ${libdir}/lib*.a ${includedir}"
FILES_${PN}-examples = "${bindir} ${datadir}"
diff --git a/classes/gconf.bbclass b/classes/gconf.bbclass
index b0c5723873..686f8e6596 100644
--- a/classes/gconf.bbclass
+++ b/classes/gconf.bbclass
@@ -1,3 +1,5 @@
+DEPENDS += "gconf"
+
gconf_postinst() {
if [ "$1" = configure ]; then
if [ "x$D" != "x" ]; then
diff --git a/classes/gpe.bbclass b/classes/gpe.bbclass
index 861ec416a0..c9268dea9c 100644
--- a/classes/gpe.bbclass
+++ b/classes/gpe.bbclass
@@ -2,6 +2,7 @@ DEPENDS_prepend = "coreutils-native virtual/libintl intltool-native "
GPE_TARBALL_SUFFIX ?= "gz"
SRC_URI = "${GPE_MIRROR}/${PN}-${PV}.tar.${GPE_TARBALL_SUFFIX}"
FILES_${PN} += "${datadir}/gpe ${datadir}/application-registry"
+SECTION ?= "gpe"
MAINTAINER ?= "GPE Team <gpe@handhelds.org>"
inherit gettext
diff --git a/classes/icecc.bbclass b/classes/icecc.bbclass
index 7dfcfc29a4..2f34d408d2 100644
--- a/classes/icecc.bbclass
+++ b/classes/icecc.bbclass
@@ -1,9 +1,17 @@
# IceCream distributed compiling support
-#
+#
# We need to create a tar.bz2 of our toolchain and set
# ICECC_VERSION, ICECC_CXX and ICEC_CC
#
+def icc_determine_gcc_version(gcc):
+ """
+ Hack to determine the version of GCC
+
+ 'i686-apple-darwin8-gcc-4.0.1 (GCC) 4.0.1 (Apple Computer, Inc. build 5363)'
+ """
+ return os.popen("%s --version" % gcc ).readline().split()[2]
+
def create_env(bb,d):
"""
Create a tar.bz of the current toolchain
@@ -13,7 +21,7 @@ def create_env(bb,d):
# host prefix is empty (let us duplicate the query for ease)
prefix = bb.data.expand('${HOST_PREFIX}', d)
if len(prefix) == 0:
- return ""
+ return ""
import tarfile
import socket
@@ -23,51 +31,66 @@ def create_env(bb,d):
prefix = bb.data.expand('${HOST_PREFIX}' , d)
distro = bb.data.expand('${DISTRO}', d)
target_sys = bb.data.expand('${TARGET_SYS}', d)
- #float = bb.data.getVar('${TARGET_FPU}', d)
- float = "anyfloat"
+ float = bb.data.getVar('${TARGET_FPU}', d) or "hard"
name = socket.gethostname()
+ # Stupid check to determine if we have built a libc and a cross
+ # compiler.
try:
- os.stat(ice_dir + '/' + target_sys + '/lib/ld-linux.so.2')
- os.stat(ice_dir + '/' + target_sys + '/bin/g++')
+ os.stat(os.path.join(ice_dir, target_sys, 'lib', 'ld-linux.so.2'))
+ os.stat(os.path.join(ice_dir, target_sys, 'bin', 'g++'))
except:
- return ""
+ return ""
- VERSION = '3.4.3'
+ VERSION = icc_determine_gcc_version( os.path.join(ice_dir,target_sys,"bin","g++") )
cross_name = prefix + distro + target_sys + float +VERSION+ name
- tar_file = ice_dir + '/ice/' + cross_name + '.tar.bz2'
+ tar_file = os.path.join(ice_dir, 'ice', cross_name + '.tar.bz2')
try:
os.stat(tar_file)
return tar_file
except:
- try:
- os.makedirs(ice_dir+'/ice')
- except:
- pass
+ try:
+ os.makedirs(os.path.join(ice_dir,'ice'))
+ except:
+ pass
# FIXME find out the version of the compiler
+ # Consider using -print-prog-name={cc1,cc1plus}
+ # and -print-file-name=specs
+
+ # We will use the GCC to tell us which tools to use
+ # What we need is:
+ # -gcc
+ # -g++
+ # -as
+ # -cc1
+ # -cc1plus
+ # and we add them to /usr/bin
+
tar = tarfile.open(tar_file, 'w:bz2')
- tar.add(ice_dir + '/' + target_sys + '/lib/ld-linux.so.2',
- target_sys + 'cross/lib/ld-linux.so.2')
- tar.add(ice_dir + '/' + target_sys + '/lib/ld-linux.so.2',
- target_sys + 'cross/lib/ld-2.3.3.so')
- tar.add(ice_dir + '/' + target_sys + '/lib/libc-2.3.3.so',
- target_sys + 'cross/lib/libc-2.3.3.so')
- tar.add(ice_dir + '/' + target_sys + '/lib/libc.so.6',
- target_sys + 'cross/lib/libc.so.6')
- tar.add(ice_dir + '/' + target_sys + '/bin/gcc',
- target_sys + 'cross/usr/bin/gcc')
- tar.add(ice_dir + '/' + target_sys + '/bin/g++',
- target_sys + 'cross/usr/bin/g++')
- tar.add(ice_dir + '/' + target_sys + '/bin/as',
- target_sys + 'cross/usr/bin/as')
- tar.add(ice_dir + '/lib/gcc/' + target_sys +'/'+ VERSION + '/specs',
- target_sys+'cross/usr/lib/gcc/'+target_sys+'/'+VERSION+'/lib/specs')
- tar.add(ice_dir + '/libexec/gcc/'+target_sys+'/' + VERSION + '/cc1',
- target_sys + 'cross/usr/lib/gcc/'+target_sys+'/'+VERSION+'/lib/cc1')
- tar.add(ice_dir + '/libexec/gcc/arm-linux/' + VERSION + '/cc1plus',
- target_sys+'cross/usr/lib/gcc/'+target_sys+'/'+VERSION+'/lib/cc1plus')
+
+ # Now add the required files
+ tar.add(os.path.join(ice_dir,target_sys,'bin','gcc'),
+ os.path.join("usr","bin","gcc") )
+ tar.add(os.path.join(ice_dir,target_sys,'bin','g++'),
+ os.path.join("usr","bin","g++") )
+ tar.add(os.path.join(ice_dir,target_sys,'bin','as'),
+ os.path.join("usr","bin","as") )
+
+ # Now let us find cc1 and cc1plus
+ cc1 = os.popen("%s -print-prog-name=cc1" % data.getVar('CC', d, True)).read()[:-1]
+ cc1plus = os.popen("%s -print-prog-name=cc1plus" % data.getVar('CC', d, True)).read()[:-1]
+ spec = os.popen("%s -print-file-name=specs" % data.getVar('CC', d, True)).read()[:-1]
+
+ # CC1 and CC1PLUS should be there...
+ tar.add(cc1, os.path.join('usr', 'bin', 'cc1'))
+ tar.add(cc1plus, os.path.join('usr', 'bin', 'cc1plus'))
+
+ # spec - if it exists
+ if os.path.exists(spec):
+ tar.add(spec)
+
tar.close()
return tar_file
@@ -78,7 +101,7 @@ def create_path(compilers, type, bb, d):
"""
import os
- staging = bb.data.expand('${STAGING_DIR}', d) + "/ice/" + type
+ staging = os.path.join(bb.data.expand('${STAGING_DIR}', d), "ice", type)
icecc = bb.data.getVar('ICECC_PATH', d)
# Create the dir if necessary
@@ -89,7 +112,7 @@ def create_path(compilers, type, bb, d):
for compiler in compilers:
- gcc_path = staging + "/" + compiler
+ gcc_path = os.path.join(staging, compiler)
try:
os.stat(gcc_path)
except:
@@ -102,15 +125,14 @@ def use_icc_version(bb,d):
# Constin native native
prefix = bb.data.expand('${HOST_PREFIX}', d)
if len(prefix) == 0:
- return "no"
-
-
- native = bb.data.expand('${PN}', d)
- blacklist = [ "-cross", "-native" ]
+ return "no"
+
+
+ blacklist = [ "cross", "native" ]
for black in blacklist:
- if black in native:
- return "no"
+ if bb.data.inherits_class(black, d):
+ return "no"
return "yes"
@@ -118,13 +140,13 @@ def icc_path(bb,d,compile):
native = bb.data.expand('${PN}', d)
blacklist = [ "ulibc", "glibc", "ncurses" ]
for black in blacklist:
- if black in native:
- return ""
+ if black in native:
+ return ""
- if "-native" in native:
- compile = False
- if "-cross" in native:
- compile = False
+ blacklist = [ "cross", "native" ]
+ for black in blacklist:
+ if bb.data.inherits_class(black, d):
+ compile = False
prefix = bb.data.expand('${HOST_PREFIX}', d)
if compile and len(prefix) != 0:
@@ -151,6 +173,6 @@ do_compile_prepend() {
export ICECC_CXX="${HOST_PREFIX}g++"
if [ "${@use_icc_version(bb,d)}" = "yes" ]; then
- export ICECC_VERSION="${@icc_version(bb,d)}"
+ export ICECC_VERSION="${@icc_version(bb,d)}"
fi
}
diff --git a/classes/image_ipk.bbclass b/classes/image_ipk.bbclass
index c2f1c8d682..83e9acf315 100644
--- a/classes/image_ipk.bbclass
+++ b/classes/image_ipk.bbclass
@@ -1,7 +1,8 @@
inherit rootfs_ipk
-# We need to follow RDEPENDS and RRECOMMENDS for images
+# We need to recursively follow RDEPENDS and RRECOMMENDS for images
BUILD_ALL_DEPS = "1"
+do_rootfs[recrdeptask] = "do_package"
# Images are generally built explicitly, do not need to be part of world.
EXCLUDE_FROM_WORLD = "1"
@@ -22,7 +23,27 @@ def get_image_deps(d):
DEPENDS += "${@get_image_deps(d)}"
-IMAGE_DEVICE_TABLE ?= "${@bb.which(bb.data.getVar('BBPATH', d, 1), 'files/device_table-minimal.txt')}"
+#
+# Get a list of files containing device tables to create.
+# * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
+# * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
+# for in the BBPATH
+# If neither are specified then the default name of files/device_table-minimal.txt
+# is searched for in the BBPATH (same as the old version.)
+#
+def get_devtable_list(d):
+ import bb
+ devtable = bb.data.getVar('IMAGE_DEVICE_TABLE', d, 1)
+ if devtable != None:
+ return devtable
+ str = ""
+ devtables = bb.data.getVar('IMAGE_DEVICE_TABLES', d, 1)
+ if devtables == None:
+ devtables = 'files/device_table-minimal.txt'
+ for devtable in devtables.split():
+ str += " %s" % bb.which(bb.data.getVar('BBPATH', d, 1), devtable)
+ return str
+
IMAGE_POSTPROCESS_COMMAND ?= ""
# Must call real_do_rootfs() from inside here, rather than as a separate
@@ -33,7 +54,9 @@ fakeroot do_rootfs () {
if [ "${USE_DEVFS}" != "1" ]; then
mkdir -p ${IMAGE_ROOTFS}/dev
- makedevs -r ${IMAGE_ROOTFS} -D ${IMAGE_DEVICE_TABLE}
+ for devtable in ${@get_devtable_list(d)}; do
+ makedevs -r ${IMAGE_ROOTFS} -D $devtable
+ done
fi
real_do_rootfs
diff --git a/classes/insane.bbclass b/classes/insane.bbclass
index 629d0e82b0..ead718db7f 100644
--- a/classes/insane.bbclass
+++ b/classes/insane.bbclass
@@ -1,24 +1,20 @@
-#
# BB Class inspired by ebuild.sh
#
-# As I will be copying code from from ebuild.sh this is
-# Copyright Gentoo Foundation 1999-2006
-# GPLv2
-#
# This class will test files after installation for certain
# security issues and other kind of issues.
#
# Checks we do:
# -Check the ownership and permissions
# -Check the RUNTIME path for the $TMPDIR
+# -Check if .la files wrongly point to workdir
+# -Check if .pc files wrongly point to workdir
+# -Check if packages contains .dbg or .so files where they should be in -dev or -dbg
#
-# Checks that are planned:
-# -Check installed and stages .la files
#
# We need to have the scanelf utility as soon as
-# possible.
+# possible and this is contained within the pax-utils-native
#
# We play a special package function
@@ -26,7 +22,95 @@ inherit package
PACKAGE_DEPENDS += "pax-utils-native"
PACKAGEFUNCS += " do_package_qa "
+def package_qa_check_rpath(file,name,d):
+ """
+ Check for dangerous RPATHs
+ """
+ import bb, os
+ scanelf = os.path.join(bb.data.getVar('STAGING_BINDIR',d,True),'scanelf')
+ bad_dir = bb.data.getVar('TMPDIR', d, True) + "/work"
+ if not os.path.exists(scanelf):
+ bb.note("Can not check RPATH scanelf not found")
+ if not bad_dir in bb.data.getVar('WORKDIR', d, True):
+ bb.error("This class assumed that WORKDIR is ${TMPDIR}/work... Not doing any check")
+
+ output = os.popen("%s -Byr %s" % (scanelf,file))
+ txt = output.readline().rsplit()
+ if bad_dir in txt:
+ bb.error("QA Issue package %s contains bad RPATH %s in file %s" % (name, txt, file))
+
+ pass
+
+def package_qa_check_devdbg(path, name,d):
+ """
+ Check for debug remains inside the binary or
+ non dev packages containing
+ """
+
+ import bb
+ if not "-dev" in name:
+ if path[-3:] == ".so":
+ bb.error("QA Issue: non dev package contains .so")
+
+ if not "-dbg" in name:
+ if path[-4:] == ".dbg":
+ bb.error("QA Issue: non debug package contains .dbg file")
+
+def package_qa_check_perm(path,name,d):
+ """
+ Check the permission of files
+ """
+ pass
+
+def package_qa_check_arch(path,name,d):
+ """
+ Check if archs are compatible
+ """
+ pass
+
+def package_qa_check_pcla(path,name,d):
+ """
+ .pc and .la files should not point
+ """
+
+def package_qa_check_staged(path,d):
+ """
+ Check staged la and pc files for sanity
+ -e.g. installed being false
+ """
+ pass
+
+# Walk over all files in a directory and call func
+def package_qa_walk(path, funcs, package,d):
+ import os
+ for root, dirs, files in os.walk(path):
+ for file in files:
+ path = os.path.join(root,file)
+ for func in funcs:
+ func(path, package,d)
+
+
+# The PACKAGE FUNC to scan each package
python do_package_qa () {
+ bb.note("DO PACKAGE QA")
+ workdir = bb.data.getVar('WORKDIR', d, True)
+ packages = bb.data.getVar('PACKAGES',d, True)
+
+ # no packages should be scanned
+ if not packages:
+ return
+
+ for package in packages.split():
+ bb.note("Package: %s" % package)
+ path = "%s/install/%s" % (workdir, package)
+ package_qa_walk(path, [package_qa_check_rpath, package_qa_check_devdbg, package_qa_check_perm, package_qa_check_arch], package, d)
}
+# The Staging Func, to check all staging
+addtask qa_staging after do_populate_staging before do_build
+python do_qa_staging() {
+ bb.note("Staged!")
+
+ package_qa_check_staged(bb.data.getVar('STAGING_DIR',d,True), d)
+}
diff --git a/classes/kernel-arch.bbclass b/classes/kernel-arch.bbclass
index 92a6c982fb..b331d25614 100644
--- a/classes/kernel-arch.bbclass
+++ b/classes/kernel-arch.bbclass
@@ -19,6 +19,7 @@ def map_kernel_arch(a, d):
elif re.match('armeb$', a): return 'arm'
elif re.match('powerpc$', a): return 'ppc'
elif re.match('mipsel$', a): return 'mips'
+ elif re.match('sh(3|4)$', a): return 'sh'
elif a in valid_archs: return a
else:
bb.error("cannot map '%s' to a linux kernel architecture" % a)
diff --git a/classes/kernel.bbclass b/classes/kernel.bbclass
index ab8c03014f..94e02925d7 100644
--- a/classes/kernel.bbclass
+++ b/classes/kernel.bbclass
@@ -10,7 +10,7 @@ PACKAGES_DYNAMIC += "kernel-image-*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-KERNEL_IMAGETYPE = "zImage"
+KERNEL_IMAGETYPE ?= "zImage"
KERNEL_PRIORITY = "${@bb.data.getVar('PV',d,1).split('-')[0].split('.')[-1]}"
@@ -45,11 +45,18 @@ export CMDLINE_CONSOLE = "console=${@bb.data.getVar("KERNEL_CONSOLE",d,1) or "tt
# parse kernel ABI version out of <linux/version.h>
def get_kernelversion(p):
+ import re, os
+
+ fn = p + '/include/linux/utsrelease.h'
+ if not os.path.isfile(fn):
+ fn = p + '/include/linux/version.h'
+
import re
try:
- f = open(p, 'r')
+ f = open(fn, 'r')
except IOError:
return None
+
l = f.readlines()
f.close()
r = re.compile("#define UTS_RELEASE \"(.*)\"")
@@ -67,7 +74,7 @@ def get_kernelmajorversion(p):
return m.group(1)
return None
-KERNEL_VERSION = "${@get_kernelversion('${S}/include/linux/version.h')}"
+KERNEL_VERSION = "${@get_kernelversion('${S}')}"
KERNEL_MAJOR_VERSION = "${@get_kernelmajorversion('${KERNEL_VERSION}')}"
KERNEL_LOCALVERSION ?= ""
@@ -109,6 +116,21 @@ kernel_do_stage() {
mkdir -p ${STAGING_KERNEL_DIR}/include/pcmcia
cp -fR include/pcmcia/* ${STAGING_KERNEL_DIR}/include/pcmcia/
+ if [ -d drivers/crypto ]; then
+ mkdir -p ${STAGING_KERNEL_DIR}/drivers/crypto
+ cp -fR drivers/crypto/* ${STAGING_KERNEL_DIR}/drivers/crypto/
+ fi
+
+ if [ -d include/media ]; then
+ mkdir -p ${STAGING_KERNEL_DIR}/include/media
+ cp -fR include/media/* ${STAGING_KERNEL_DIR}/include/media/
+ fi
+
+ if [ -d include/acpi ]; then
+ mkdir -p ${STAGING_KERNEL_DIR}/include/acpi
+ cp -fR include/acpi/* ${STAGING_KERNEL_DIR}/include/acpi/
+ fi
+
if [ -d include/sound ]; then
mkdir -p ${STAGING_KERNEL_DIR}/include/sound
cp -fR include/sound/* ${STAGING_KERNEL_DIR}/include/sound/
@@ -133,7 +155,7 @@ kernel_do_stage() {
# Check if arch/${ARCH}/Makefile exists and install it
if [ -e arch/${ARCH}/Makefile ]; then
install -d ${STAGING_KERNEL_DIR}/arch/${ARCH}
- install -m 0644 arch/${ARCH}/Makefile ${STAGING_KERNEL_DIR}/arch/${ARCH}
+ install -m 0644 arch/${ARCH}/Makefile* ${STAGING_KERNEL_DIR}/arch/${ARCH}
fi
cp -fR include/config* ${STAGING_KERNEL_DIR}/include/
install -m 0644 ${KERNEL_OUTPUT} ${STAGING_KERNEL_DIR}/${KERNEL_IMAGETYPE}
@@ -192,11 +214,22 @@ PKG_kernel-image = "kernel-image-${KERNEL_VERSION}"
ALLOW_EMPTY_kernel = "1"
ALLOW_EMPTY_kernel-image = "1"
+pkg_postinst_kernel-image () {
+if [ ! -e "$D/lib/modules/${KERNEL_RELEASE}" ]; then
+ mkdir -p $D/lib/modules/${KERNEL_RELEASE}
+fi
+if [ -n "$D" ]; then
+ ${HOST_PREFIX}depmod-${KERNEL_MAJOR_VERSION} -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_RELEASE} ${KERNEL_VERSION}
+else
+ depmod -a
+fi
+}
+
pkg_postinst_modules () {
if [ -n "$D" ]; then
${HOST_PREFIX}depmod-${KERNEL_MAJOR_VERSION} -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_RELEASE} ${KERNEL_VERSION}
else
- depmod -A
+ depmod -a
update-modules || true
fi
}
diff --git a/classes/module.bbclass b/classes/module.bbclass
index 8a13f1f858..6089f90462 100644
--- a/classes/module.bbclass
+++ b/classes/module.bbclass
@@ -38,7 +38,7 @@ pkg_postinst_append () {
if [ -n "$D" ]; then
exit 1
fi
- depmod -A
+ depmod -a
update-modules || true
}
diff --git a/classes/opie.bbclass b/classes/opie.bbclass
index 47f364a644..922cb9435a 100644
--- a/classes/opie.bbclass
+++ b/classes/opie.bbclass
@@ -18,7 +18,7 @@ inherit palmtop
# Note that when CVS changes to 1.2.2, the dash
# should be removed from OPIE_CVS_PV to convert
# to the standardised version format
-OPIE_CVS_PV = "1.2.1+cvs-${SRCDATE}"
+OPIE_CVS_PV = "1.2.2+cvs-${SRCDATE}"
DEPENDS_prepend = "${@["libopie2 ", ""][(bb.data.getVar('PN', d, 1) == 'libopie2')]}"
@@ -102,4 +102,4 @@ python opie_do_opie_install() {
}
EXPORT_FUNCTIONS do_opie_install
-addtask opie_install after do_compile before do_populate_staging
+addtask opie_install after do_compile before do_package
diff --git a/classes/patch.bbclass b/classes/patch.bbclass
new file mode 100644
index 0000000000..e3b89ba4f9
--- /dev/null
+++ b/classes/patch.bbclass
@@ -0,0 +1,490 @@
+# Copyright (C) 2006 OpenedHand LTD
+
+def patch_init(d):
+ import os, sys
+
+ def md5sum(fname):
+ import md5, sys
+
+ f = file(fname, 'rb')
+ m = md5.new()
+ while True:
+ d = f.read(8096)
+ if not d:
+ break
+ m.update(d)
+ f.close()
+ return m.hexdigest()
+
+ class CmdError(Exception):
+ def __init__(self, exitstatus, output):
+ self.status = exitstatus
+ self.output = output
+
+ def __str__(self):
+ return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output)
+
+ class NotFoundError(Exception):
+ def __init__(self, path):
+ self.path = path
+ def __str__(self):
+ return "Error: %s not found." % self.path
+
+ def runcmd(args, dir = None):
+ import commands
+
+ if dir:
+ olddir = os.path.abspath(os.curdir)
+ if not os.path.exists(dir):
+ raise NotFoundError(dir)
+ os.chdir(dir)
+ # print("cwd: %s -> %s" % (olddir, self.dir))
+
+ try:
+ args = [ commands.mkarg(str(arg)) for arg in args ]
+ cmd = " ".join(args)
+ # print("cmd: %s" % cmd)
+ (exitstatus, output) = commands.getstatusoutput(cmd)
+ if exitstatus != 0:
+ raise CmdError(exitstatus >> 8, output)
+ return output
+
+ finally:
+ if dir:
+ os.chdir(olddir)
+
+ class PatchError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return "Patch Error: %s" % self.msg
+
+ import bb, bb.data, bb.fetch
+
+ class PatchSet(object):
+ defaults = {
+ "strippath": 1
+ }
+
+ def __init__(self, dir, d):
+ self.dir = dir
+ self.d = d
+ self.patches = []
+ self._current = None
+
+ def current(self):
+ return self._current
+
+ def Clean(self):
+ """
+ Clean out the patch set. Generally includes unapplying all
+ patches and wiping out all associated metadata.
+ """
+ raise NotImplementedError()
+
+ def Import(self, patch, force):
+ if not patch.get("file"):
+ if not patch.get("remote"):
+ raise PatchError("Patch file must be specified in patch import.")
+ else:
+ patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
+
+ for param in PatchSet.defaults:
+ if not patch.get(param):
+ patch[param] = PatchSet.defaults[param]
+
+ if patch.get("remote"):
+ patch["file"] = bb.data.expand(bb.fetch.localpath(patch["remote"], self.d), self.d)
+
+ patch["filemd5"] = md5sum(patch["file"])
+
+ def Push(self, force):
+ raise NotImplementedError()
+
+ def Pop(self, force):
+ raise NotImplementedError()
+
+ def Refresh(self, remote = None, all = None):
+ raise NotImplementedError()
+
+
+ class PatchTree(PatchSet):
+ def __init__(self, dir, d):
+ PatchSet.__init__(self, dir, d)
+
+ def Import(self, patch, force = None):
+ """"""
+ PatchSet.Import(self, patch, force)
+
+ if self._current is not None:
+ i = self._current + 1
+ else:
+ i = 0
+ self.patches.insert(i, patch)
+
+ def _applypatch(self, patch, force = None, reverse = None):
+ shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ if reverse:
+ shellcmd.append('-R')
+
+ if not force:
+ shellcmd.append('--dry-run')
+
+ output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ if force:
+ return
+
+ shellcmd.pop(len(shellcmd) - 1)
+ output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ return output
+
+ def Push(self, force = None, all = None):
+ bb.note("self._current is %s" % self._current)
+ bb.note("patches is %s" % self.patches)
+ if all:
+ for i in self.patches:
+ if self._current is not None:
+ self._current = self._current + 1
+ else:
+ self._current = 0
+ bb.note("applying patch %s" % i)
+ self._applypatch(i, force)
+ else:
+ if self._current is not None:
+ self._current = self._current + 1
+ else:
+ self._current = 0
+ bb.note("applying patch %s" % self.patches[self._current])
+ self._applypatch(self.patches[self._current], force)
+
+
+ def Pop(self, force = None, all = None):
+ if all:
+ for i in self.patches:
+ self._applypatch(i, force, True)
+ else:
+ self._applypatch(self.patches[self._current], force, True)
+
+ def Clean(self):
+ """"""
+
+ class QuiltTree(PatchSet):
+ def _runcmd(self, args):
+ runcmd(["quilt"] + args, self.dir)
+
+ def _quiltpatchpath(self, file):
+ return os.path.join(self.dir, "patches", os.path.basename(file))
+
+
+ def __init__(self, dir, d):
+ PatchSet.__init__(self, dir, d)
+ self.initialized = False
+ p = os.path.join(self.dir, 'patches')
+ if not os.path.exists(p):
+ os.makedirs(p)
+
+ def Clean(self):
+ try:
+ self._runcmd(["pop", "-a", "-f"])
+ except Exception:
+ pass
+ self.initialized = True
+
+ def InitFromDir(self):
+ # read series -> self.patches
+ seriespath = os.path.join(self.dir, 'patches', 'series')
+ if not os.path.exists(self.dir):
+ raise Exception("Error: %s does not exist." % self.dir)
+ if os.path.exists(seriespath):
+ series = file(seriespath, 'r')
+ for line in series.readlines():
+ patch = {}
+ parts = line.strip().split()
+ patch["quiltfile"] = self._quiltpatchpath(parts[0])
+ patch["quiltfilemd5"] = md5sum(patch["quiltfile"])
+ if len(parts) > 1:
+ patch["strippath"] = parts[1][2:]
+ self.patches.append(patch)
+ series.close()
+
+ # determine which patches are applied -> self._current
+ try:
+ output = runcmd(["quilt", "applied"], self.dir)
+ except CmdError:
+ if sys.exc_value.output.strip() == "No patches applied":
+ return
+ else:
+ raise sys.exc_value
+ output = [val for val in output.split('\n') if not val.startswith('#')]
+ for patch in self.patches:
+ if os.path.basename(patch["quiltfile"]) == output[-1]:
+ self._current = self.patches.index(patch)
+ self.initialized = True
+
+ def Import(self, patch, force = None):
+ if not self.initialized:
+ self.InitFromDir()
+ PatchSet.Import(self, patch, force)
+
+ args = ["import", "-p", patch["strippath"]]
+ if force:
+ args.append("-f")
+ args.append(patch["file"])
+
+ self._runcmd(args)
+
+ patch["quiltfile"] = self._quiltpatchpath(patch["file"])
+ patch["quiltfilemd5"] = md5sum(patch["quiltfile"])
+
+ # TODO: determine if the file being imported:
+ # 1) is already imported, and is the same
+ # 2) is already imported, but differs
+
+ self.patches.insert(self._current or 0, patch)
+
+
+ def Push(self, force = None, all = None):
+ # quilt push [-f]
+
+ args = ["push"]
+ if force:
+ args.append("-f")
+ if all:
+ args.append("-a")
+
+ self._runcmd(args)
+
+ if self._current is not None:
+ self._current = self._current + 1
+ else:
+ self._current = 0
+
+ def Pop(self, force = None, all = None):
+ # quilt pop [-f]
+ args = ["pop"]
+ if force:
+ args.append("-f")
+ if all:
+ args.append("-a")
+
+ self._runcmd(args)
+
+ if self._current == 0:
+ self._current = None
+
+ if self._current is not None:
+ self._current = self._current - 1
+
+ def Refresh(self, **kwargs):
+ if kwargs.get("remote"):
+ patch = self.patches[kwargs["patch"]]
+ if not patch:
+ raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
+ (type, host, path, user, pswd, parm) = bb.decodeurl(patch["remote"])
+ if type == "file":
+ import shutil
+ if not patch.get("file") and patch.get("remote"):
+ patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
+
+ shutil.copyfile(patch["quiltfile"], patch["file"])
+ else:
+ raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
+ else:
+ # quilt refresh
+ args = ["refresh"]
+ if kwargs.get("quiltfile"):
+ args.append(os.path.basename(kwargs["quiltfile"]))
+ elif kwargs.get("patch"):
+ args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
+ self._runcmd(args)
+
+ class Resolver(object):
+ def __init__(self, patchset):
+ raise NotImplementedError()
+
+ def Resolve(self):
+ raise NotImplementedError()
+
+ def Revert(self):
+ raise NotImplementedError()
+
+ def Finalize(self):
+ raise NotImplementedError()
+
+ class NOOPResolver(Resolver):
+ def __init__(self, patchset):
+ self.patchset = patchset
+
+ def Resolve(self):
+ olddir = os.path.abspath(os.curdir)
+ os.chdir(self.patchset.dir)
+ try:
+ self.patchset.Push()
+ except Exception:
+ os.chdir(olddir)
+ raise sys.exc_value
+
+ # Patch resolver which relies on the user doing all the work involved in the
+ # resolution, with the exception of refreshing the remote copy of the patch
+ # files (the urls).
+ class UserResolver(Resolver):
+ def __init__(self, patchset):
+ self.patchset = patchset
+
+ # Force a push in the patchset, then drop to a shell for the user to
+ # resolve any rejected hunks
+ def Resolve(self):
+
+ olddir = os.path.abspath(os.curdir)
+ os.chdir(self.patchset.dir)
+ try:
+ self.patchset.Push(True)
+ except CmdError, v:
+ # Patch application failed
+ if sys.exc_value.output.strip() == "No patches applied":
+ return
+ print(sys.exc_value)
+ print('NOTE: dropping user into a shell, so that patch rejects can be fixed manually.')
+
+ os.system('/bin/sh')
+
+ # Construct a new PatchSet after the user's changes, compare the
+ # sets, checking patches for modifications, and doing a remote
+ # refresh on each.
+ oldpatchset = self.patchset
+ self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
+
+ for patch in self.patchset.patches:
+ oldpatch = None
+ for opatch in oldpatchset.patches:
+ if opatch["quiltfile"] == patch["quiltfile"]:
+ oldpatch = opatch
+
+ if oldpatch:
+ patch["remote"] = oldpatch["remote"]
+ if patch["quiltfile"] == oldpatch["quiltfile"]:
+ if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
+ bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
+ # user change? remote refresh
+ self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
+ else:
+ # User did not fix the problem. Abort.
+ raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
+ except Exception:
+ os.chdir(olddir)
+ raise
+ os.chdir(olddir)
+
+ g = globals()
+ g["PatchSet"] = PatchSet
+ g["PatchTree"] = PatchTree
+ g["QuiltTree"] = QuiltTree
+ g["Resolver"] = Resolver
+ g["UserResolver"] = UserResolver
+ g["NOOPResolver"] = NOOPResolver
+ g["NotFoundError"] = NotFoundError
+ g["CmdError"] = CmdError
+
+addtask patch after do_unpack
+do_patch[dirs] = "${WORKDIR}"
+python patch_do_patch() {
+ import re
+ import bb.fetch
+
+ patch_init(d)
+
+ src_uri = (bb.data.getVar('SRC_URI', d, 1) or '').split()
+ if not src_uri:
+ return
+
+ patchsetmap = {
+ "patch": PatchTree,
+ "quilt": QuiltTree,
+ }
+
+ cls = patchsetmap[bb.data.getVar('PATCHTOOL', d, 1) or 'quilt']
+
+ resolvermap = {
+ "noop": NOOPResolver,
+ "user": UserResolver,
+ }
+
+ rcls = resolvermap[bb.data.getVar('PATCHRESOLVE', d, 1) or 'user']
+
+ s = bb.data.getVar('S', d, 1)
+
+ path = os.getenv('PATH')
+ os.putenv('PATH', bb.data.getVar('PATH', d, 1))
+ patchset = cls(s, d)
+ patchset.Clean()
+
+ resolver = rcls(patchset)
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ for url in src_uri:
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ if not "patch" in parm:
+ continue
+
+ bb.fetch.init([url],d)
+ url = bb.encodeurl((type, host, path, user, pswd, []))
+ local = os.path.join('/', bb.fetch.localpath(url, d))
+
+ # did it need to be unpacked?
+ dots = os.path.basename(local).split(".")
+ if dots[-1] in ['gz', 'bz2', 'Z']:
+ unpacked = os.path.join(bb.data.getVar('WORKDIR', d),'.'.join(dots[0:-1]))
+ else:
+ unpacked = local
+ unpacked = bb.data.expand(unpacked, d)
+
+ if "pnum" in parm:
+ pnum = parm["pnum"]
+ else:
+ pnum = "1"
+
+ if "pname" in parm:
+ pname = parm["pname"]
+ else:
+ pname = os.path.basename(unpacked)
+
+ if "mindate" in parm:
+ mindate = parm["mindate"]
+ else:
+ mindate = 0
+
+ if "maxdate" in parm:
+ maxdate = parm["maxdate"]
+ else:
+ maxdate = "20711226"
+
+ pn = bb.data.getVar('PN', d, 1)
+ srcdate = bb.data.getVar('SRCDATE_%s' % pn, d, 1)
+
+ if not srcdate:
+ srcdate = bb.data.getVar('SRCDATE', d, 1)
+
+ if srcdate == "now":
+ srcdate = bb.data.getVar('DATE', d, 1)
+
+ if (maxdate < srcdate) or (mindate > srcdate):
+ if (maxdate < srcdate):
+ bb.note("Patch '%s' is outdated" % pname)
+
+ if (mindate > srcdate):
+ bb.note("Patch '%s' is predated" % pname)
+
+ continue
+
+ bb.note("Applying patch '%s'" % pname)
+ try:
+ patchset.Import({"file":unpacked, "remote":url, "strippath": pnum}, True)
+ except NotFoundError:
+ import sys
+ raise bb.build.FuncFailed(str(sys.exc_value))
+ resolver.Resolve()
+}
+
+EXPORT_FUNCTIONS do_patch
diff --git a/classes/pkgconfig.bbclass b/classes/pkgconfig.bbclass
index 62f15f312d..f2054b0b07 100644
--- a/classes/pkgconfig.bbclass
+++ b/classes/pkgconfig.bbclass
@@ -20,7 +20,7 @@ def get_pkgconfig_mangle(d):
return s
do_stage_append () {
- for pc in `find ${S} -name '*.pc' | grep -v -- '-uninstalled.pc$'`; do
+ for pc in `find ${S} -name '*.pc' -type f | grep -v -- '-uninstalled.pc$'`; do
pcname=`basename $pc`
install -d ${PKG_CONFIG_PATH}
cat $pc | sed ${@get_pkgconfig_mangle(d)} > ${PKG_CONFIG_PATH}/$pcname
diff --git a/classes/rm_work.bbclass b/classes/rm_work.bbclass
index 340446917e..7f590e1b15 100644
--- a/classes/rm_work.bbclass
+++ b/classes/rm_work.bbclass
@@ -19,4 +19,4 @@ do_rm_work () {
}
addtask rm_work before do_build
-addtask rm_work after do_package
+addtask rm_work after do_populate_staging
diff --git a/classes/rootfs_ipk.bbclass b/classes/rootfs_ipk.bbclass
index 2880411c31..25738e8cb1 100644
--- a/classes/rootfs_ipk.bbclass
+++ b/classes/rootfs_ipk.bbclass
@@ -31,13 +31,12 @@ real_do_rootfs () {
mkdir -p ${IMAGE_ROOTFS}/dev
if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then
- rm -f ${DEPLOY_DIR_IPK}/Packages
touch ${DEPLOY_DIR_IPK}/Packages
ipkg-make-index -r ${DEPLOY_DIR_IPK}/Packages -p ${DEPLOY_DIR_IPK}/Packages -l ${DEPLOY_DIR_IPK}/Packages.filelist -m ${DEPLOY_DIR_IPK}
fi
mkdir -p ${T}
echo "src oe file:${DEPLOY_DIR_IPK}" > ${T}/ipkg.conf
- ipkgarchs="all any noarch ${TARGET_ARCH} ${IPKG_ARCHS} ${MACHINE}"
+ ipkgarchs="${IPKG_ARCHS}"
priority=1
for arch in $ipkgarchs; do
echo "arch $arch $priority" >> ${T}/ipkg.conf
@@ -97,7 +96,7 @@ log_check() {
then
echo "log_check: There were error messages in the logfile"
echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -i "$keyword_die"
+ echo "$lf_txt" | grep -v log_check | grep -i "$keyword_die" -C1
echo ""
do_exit=1
fi
@@ -139,7 +138,14 @@ remove_init_link () {
fi
}
+make_zimage_symlink_relative () {
+ if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
+ (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
+ fi
+}
+
# export the zap_root_password, create_etc_timestamp and remote_init_link
-EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link
+EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link make_zimage_symlink_relative
+
addtask rootfs before do_build after do_install
diff --git a/classes/sanity.bbclass b/classes/sanity.bbclass
index a06753b198..23a8f656b2 100644
--- a/classes/sanity.bbclass
+++ b/classes/sanity.bbclass
@@ -4,7 +4,11 @@
def raise_sanity_error(msg):
import bb
- bb.fatal("Openembedded's config sanity checker detected a potential misconfiguration.\nEither fix the cause of this error or at your own risk disable the checker (see sanity.conf).\n%s" % msg)
+ bb.fatal(""" Openembedded's config sanity checker detected a potential misconfiguration.
+ Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
+ Following is the list of potential problems / advisories:
+
+ %s""" % msg)
def check_conf_exists(fn, data):
import bb, os
@@ -60,13 +64,14 @@ def check_sanity(e):
if "diffstat-native" not in data.getVar('ASSUME_PROVIDED', e.data, True).split():
raise_sanity_error('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf')
- # Check the MACHINE is valid
+ # Check that the MACHINE is valid
if not check_conf_exists("conf/machine/${MACHINE}.conf", e.data):
raise_sanity_error('Please set a valid MACHINE in your local.conf')
- # Check the distro is valid
- if not check_conf_exists("conf/distro/${DISTRO}.conf", e.data):
- raise_sanity_error('Please set a valid DISTRO in your local.conf')
+ # Check that the DISTRO is valid
+ # need to take into account DISTRO renaming DISTRO
+ if not ( check_conf_exists("conf/distro/${DISTRO}.conf", e.data) or check_conf_exists("conf/distro/include/${DISTRO}.inc", e.data) ):
+ raise_sanity_error("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf" % data.getVar("DISTRO", e.data, True ))
if not check_app_exists("${MAKE}", e.data):
raise_sanity_error('GNU make missing. Please install GNU make')
@@ -86,6 +91,15 @@ def check_sanity(e):
if not check_app_exists('texi2html', e.data):
raise_sanity_error('Please install the texi2html binary')
+ if not check_app_exists('cvs', e.data):
+ raise_sanity_error('Please install the cvs utility')
+
+ if not check_app_exists('svn', e.data):
+ raise_sanity_error('Please install the svn utility')
+
+ if not check_app_exists('bzip2', e.data):
+ raise_sanity_error('Please install the bzip2 utility')
+
oes_bb_conf = data.getVar( 'OES_BITBAKE_CONF', e.data, True )
if not oes_bb_conf:
raise_sanity_error('You do not include OpenEmbeddeds version of conf/bitbake.conf')
diff --git a/classes/sdl.bbclass b/classes/sdl.bbclass
index c0b21427a4..d478d97f18 100644
--- a/classes/sdl.bbclass
+++ b/classes/sdl.bbclass
@@ -26,7 +26,7 @@ sdl_do_sdl_install() {
Note=Auto Generated... this may be not what you want
Comment=${DESCRIPTION}
Exec=${APPNAME}
-Icon=${APPIMAGE}
+Icon=${PN}.png
Type=Application
Name=${PN}
EOF
diff --git a/classes/tinderclient.bbclass b/classes/tinderclient.bbclass
index f9243f7108..d36ef0b343 100644
--- a/classes/tinderclient.bbclass
+++ b/classes/tinderclient.bbclass
@@ -1,6 +1,27 @@
+def tinder_http_post(server, selector, content_type, body):
+ import httplib
+ # now post it
+ for i in range(0,5):
+ try:
+ h = httplib.HTTP(server)
+ h.putrequest('POST', selector)
+ h.putheader('content-type', content_type)
+ h.putheader('content-length', str(len(body)))
+ h.endheaders()
+ h.send(body)
+ errcode, errmsg, headers = h.getreply()
+ #print errcode, errmsg, headers
+ return (errcode,errmsg, headers, h.file)
+ except:
+ print "Error sending the report!"
+ # try again
+ pass
+
+ # return some garbage
+ return (-1, "unknown", "unknown", None)
+
def tinder_form_data(bound, dict, log):
output = []
- #br
# for each key in the dictionary
for name in dict:
output.append( "--" + bound )
@@ -29,7 +50,7 @@ def tinder_format_http_post(d,status,log):
for the tinderbox to be happy.
"""
- from bb import data
+ from bb import data, build
import os,random
# the variables we will need to send on this form post
@@ -72,7 +93,6 @@ def tinder_build_start(d):
on the server.
"""
from bb import data
- import httplib
# get the body and type
content_type, body = tinder_format_http_post(d,None,None)
@@ -84,15 +104,9 @@ def tinder_build_start(d):
#print "selector %s and url %s" % (selector, url)
# now post it
- h = httplib.HTTP(server)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
+ errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
#print errcode, errmsg, headers
- report = h.file.read()
+ report = h_file.read()
# now let us find the machine id that was assigned to us
search = "<machine id='"
@@ -108,31 +122,27 @@ def tinder_build_start(d):
f.write(report)
-def tinder_send_http(d, status, log):
+def tinder_send_http(d, status, _log):
"""
Send this log as build status
"""
from bb import data
- import httplib
# get the body and type
- content_type, body = tinder_format_http_post(d,status,log)
server = data.getVar('TINDER_HOST', d, True )
url = data.getVar('TINDER_URL', d, True )
selector = url + "/xml/build_status.pl"
- # now post it
- h = httplib.HTTP(server)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
- #print errcode, errmsg, headers
- #print h.file.read()
+ # now post it - in chunks of 10.000 charachters
+ new_log = _log
+ while len(new_log) > 0:
+ content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
+ errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
+ #print errcode, errmsg, headers
+ #print h.file.read()
+ new_log = new_log[18000:]
def tinder_print_info(d):
@@ -230,8 +240,8 @@ def tinder_tinder_start(d, event):
output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
output.append( "<--- TINDERBOX STARTING BUILD NOW" )
- output.append( "" )
-
+ output.append( "" )
+
return "\n".join(output) % vars()
def tinder_do_tinder_report(event):
@@ -245,16 +255,23 @@ def tinder_do_tinder_report(event):
information immediately. The caching/queuing needs to be
implemented. Also sending more or less information is not
implemented yet.
+
+ We have two temporary files stored in the TMP directory. One file
+ contains the assigned machine id for the tinderclient. This id gets
+ assigned when we connect the box and start the build process the second
+ file is used to workaround an EventHandler limitation. If BitBake is ran
+ with the continue option we want the Build to fail even if we get the
+ BuildCompleted Event. In this case we have to look up the status and
+ send it instead of 100/success.
"""
from bb.event import getName
- from bb import data, mkdirhier
+ from bb import data, mkdirhier, build
import os, glob
# variables
name = getName(event)
log = ""
status = 1
- #print asd
# Check what we need to do Build* shows we start or are done
if name == "BuildStarted":
tinder_build_start(event.data)
@@ -262,9 +279,18 @@ def tinder_do_tinder_report(event):
try:
# truncate the tinder log file
- f = file(data.getVar('TINDER_LOG', event.data, True), 'rw+')
- f.truncate(0)
+ f = file(data.getVar('TINDER_LOG', event.data, True), 'w')
+ f.write("")
f.close()
+ except:
+ pass
+
+ try:
+ # write a status to the file. This is needed for the -k option
+ # of BitBake
+ g = file(data.getVar('TMPDIR', event.data, True)+"/tinder-status", 'w')
+ g.write("")
+ g.close()
except IOError:
pass
@@ -285,15 +311,27 @@ def tinder_do_tinder_report(event):
elif name == "TaskFailed":
log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
elif name == "PkgStarted":
- log += "---> TINDERBOX Package %s started\n" % data.getVar('P', event.data, True)
+ log += "---> TINDERBOX Package %s started\n" % data.getVar('PF', event.data, True)
elif name == "PkgSucceeded":
- log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % data.getVar('P', event.data, True)
+ log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % data.getVar('PF', event.data, True)
elif name == "PkgFailed":
- log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % data.getVar('P', event.data, True)
+ if not data.getVar('TINDER_AUTOBUILD', event.data, True) == "0":
+ build.exec_task('do_clean', event.data)
+ log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % data.getVar('PF', event.data, True)
status = 200
+ # remember the failure for the -k case
+ h = file(data.getVar('TMPDIR', event.data, True)+"/tinder-status", 'w')
+ h.write("200")
elif name == "BuildCompleted":
log += "Build Completed\n"
status = 100
+ # Check if we have a old status...
+ try:
+ h = file(data.getVar('TMPDIR',event.data,True)+'/tinder-status', 'r')
+ status = int(h.read())
+ except:
+ pass
+
elif name == "MultipleProviders":
log += "---> TINDERBOX Multiple Providers\n"
log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
@@ -304,6 +342,9 @@ def tinder_do_tinder_report(event):
log += "Error: No Provider for: %s\n" % event.getItem()
log += "Error:Was Runtime: %d\n" % event.isRuntime()
status = 200
+ # remember the failure for the -k case
+ h = file(data.getVar('TMPDIR', event.data, True)+"/tinder-status", 'w')
+ h.write("200")
# now post the log
if len(log) == 0: