aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKlaus Kurzmann <mok@fluxnetz.de>2009-12-15 14:46:02 +0100
committerKlaus Kurzmann <mok@fluxnetz.de>2009-12-15 14:46:02 +0100
commit54fa869952dc6b7c2f05018f65434877257d44de (patch)
tree061bbe8844bd42f826a53ce1b4414dc6a568d86c
parentb9f0d895f3d94ff42a8cd5c02924443915ce0181 (diff)
parent47d6cb217ff161738488f4f82af9dd8f8c616ce0 (diff)
downloadopenembedded-54fa869952dc6b7c2f05018f65434877257d44de.tar.gz
Merge branch 'org.openembedded.dev' into martin_jansa/srcpv
Conflicts: recipes/mesa/mesa-xlib_7.2.bb recipes/uclibc/uclibc_nptl.bb re-add SRCPV to illume-keyboards-shr Signed-off-by: Klaus Kurzmann <mok@fluxnetz.de>
-rw-r--r--conf/checksums.ini74
-rw-r--r--conf/compatibility-providers.conf2
-rw-r--r--conf/distro/include/angstrom-2008-preferred-versions.inc2
-rw-r--r--conf/distro/include/sane-srcrevs-fso.inc2
-rw-r--r--conf/distro/include/sane-srcrevs.inc11
-rw-r--r--conf/machine/da850-omapl138-evm.conf2
-rw-r--r--recipes/aceofpenguins/aceofpenguins-launcher_0.4.bb (renamed from recipes/aceofpenguins/aceofpenguins-launcher_0.3.bb)2
-rw-r--r--recipes/e17/e-tasks_svn.bb10
-rw-r--r--recipes/fbreader/fbreader-0.12.1/Makefile.patch22
-rw-r--r--recipes/fbreader/fbreader_0.12.1.bb35
-rw-r--r--recipes/gabriel/gabriel_svn.bb2
-rw-r--r--recipes/gcc/gcc-cross-initial_svn.bb2
-rw-r--r--recipes/gcc/gcc-cross-intermediate_svn.bb2
-rw-r--r--recipes/gcc/gcc-cross-sdk_svn.bb2
-rw-r--r--recipes/gcc/gcc-cross_svn.bb2
-rw-r--r--recipes/gcc/gcc-svn.inc2
-rw-r--r--recipes/gnome/zenity_2.28.0.bb11
-rw-r--r--recipes/intone-video/intone-video_svn.bb19
-rw-r--r--recipes/intone/intone_svn.bb10
-rw-r--r--recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch53
-rw-r--r--recipes/iproute2/iproute2.inc2
-rw-r--r--recipes/iproute2/iproute2_2.6.18.bb2
-rw-r--r--recipes/iproute2/iproute2_2.6.20.bb2
-rw-r--r--recipes/iproute2/iproute2_2.6.22.bb2
-rw-r--r--recipes/iproute2/iproute2_2.6.29.bb3
-rw-r--r--recipes/kexec-tools/files/dietlibc.patch (renamed from recipes/kexec/files/dietlibc.patch)0
-rw-r--r--recipes/kexec-tools/files/fix-arm-arch-detection.patch (renamed from recipes/kexec/files/fix-arm-arch-detection.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-arm-atags.patch (renamed from recipes/kexec/files/kexec-arm-atags.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-klibc.patch (renamed from recipes/kexec/files/kexec-klibc.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-static.patch (renamed from recipes/kexec/files/kexec-static.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-tools-2-arm-add-uImage.patch (renamed from recipes/kexec/files/kexec-tools-2-arm-add-uImage.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-tools-2-headers.patch (renamed from recipes/kexec/files/kexec-tools-2-headers.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-tools-2-klibc.patch (renamed from recipes/kexec/files/kexec-tools-2-klibc.patch)0
-rw-r--r--recipes/kexec-tools/files/kexec-tools-arm.patch (renamed from recipes/kexec/files/kexec-tools-arm.patch)0
-rw-r--r--recipes/kexec-tools/files/no-getline-no-fscanf.patch (renamed from recipes/kexec/files/no-getline-no-fscanf.patch)0
-rw-r--r--recipes/kexec-tools/kexec-tools-dietlibc_2.0.1.bb (renamed from recipes/kexec/kexec-tools-dietlibc_2.0.1.bb)4
-rw-r--r--recipes/kexec-tools/kexec-tools-klibc-static_1.101.bb (renamed from recipes/kexec/kexec-tools-klibc-static_1.101.bb)7
-rw-r--r--recipes/kexec-tools/kexec-tools-klibc-static_2.0.1.bb (renamed from recipes/kexec/kexec-tools-klibc-static_2.0.1.bb)7
-rw-r--r--recipes/kexec-tools/kexec-tools.inc (renamed from recipes/kexec/kexec-tools.inc)0
-rw-r--r--recipes/kexec-tools/kexec-tools2.inc (renamed from recipes/kexec/kexec-tools2.inc)0
-rw-r--r--recipes/kexec-tools/kexec-tools_1.101.bb (renamed from recipes/kexec/kexec-tools_1.101.bb)0
-rw-r--r--recipes/kexec-tools/kexec-tools_2.0.1.bb (renamed from recipes/kexec/kexec-tools_2.0.1.bb)0
-rw-r--r--recipes/kexecboot/kexecboot.inc10
-rw-r--r--recipes/kexecboot/kexecboot_git.bb4
-rw-r--r--recipes/klibc/klibc-1.5.15/isystem.patch13
-rw-r--r--recipes/klibc/klibc_1.5.15.bb2
-rw-r--r--recipes/klibc/klibc_1.5.15.inc3
-rw-r--r--recipes/linux-libc-headers/linux-libc-headers-2.6.32/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch429
-rw-r--r--recipes/linux-libc-headers/linux-libc-headers_2.6.32.bb50
-rw-r--r--recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch29
-rw-r--r--recipes/linux/linux-2.6.22/mx31moboard/defconfig (renamed from recipes/linux/linux-2.6.22.6/mx31moboard/defconfig)0
-rw-r--r--recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch19
-rw-r--r--recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch8567
-rw-r--r--recipes/linux/linux-2.6.23/time.h.patch (renamed from recipes/linux/linux-2.6.24/time.h.patch)0
-rw-r--r--recipes/linux/linux-2.6.25/ronetix-pm9261/defconfig (renamed from recipes/linux/linux-2.6.25.20/ronetix-pm9261/defconfig)0
-rw-r--r--recipes/linux/linux-2.6.25/ronetix-pm9263/defconfig (renamed from recipes/linux/linux-2.6.25.20/ronetix-pm9263/defconfig)0
-rw-r--r--recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch49
-rw-r--r--recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch54
-rw-r--r--recipes/linux/linux_2.6.18.bb3
-rw-r--r--recipes/linux/linux_2.6.20.bb2
-rw-r--r--recipes/linux/linux_2.6.21+2.6.22-rc1.bb2
-rw-r--r--recipes/linux/linux_2.6.21.bb3
-rw-r--r--recipes/linux/linux_2.6.22+2.6.23-rc3.bb2
-rw-r--r--recipes/linux/linux_2.6.22+2.6.23-rc5.bb2
-rw-r--r--recipes/linux/linux_2.6.22.6.bb31
-rw-r--r--recipes/linux/linux_2.6.22.bb6
-rw-r--r--recipes/linux/linux_2.6.23+2.6.24-rc5.bb2
-rw-r--r--recipes/linux/linux_2.6.23+2.6.24-rc6.bb2
-rw-r--r--recipes/linux/linux_2.6.23.bb15
-rw-r--r--recipes/linux/linux_2.6.24.bb4
-rw-r--r--recipes/linux/linux_2.6.25.20.bb33
-rw-r--r--recipes/linux/linux_2.6.25.bb17
-rw-r--r--recipes/linux/linux_2.6.26.bb6
-rw-r--r--recipes/linux/linux_2.6.27.bb3
-rw-r--r--recipes/linux/linux_2.6.28.bb3
-rw-r--r--recipes/linux/linux_2.6.29+2.6.30-rc5.bb2
-rw-r--r--recipes/linux/linux_2.6.29.bb5
-rw-r--r--recipes/linux/linux_2.6.30.bb4
-rw-r--r--recipes/linux/linux_2.6.31.bb4
-rw-r--r--recipes/linux/linux_2.6.32.bb3
-rw-r--r--recipes/mesa/mesa-common-old.inc5
-rw-r--r--recipes/mesa/mesa-dri_6.5.2.bb2
-rw-r--r--recipes/mesa/mesa-dri_7.0.3.bb2
-rw-r--r--recipes/mesa/mesa-xlib_7.2.bb3
-rw-r--r--recipes/mesa/mesa-xlib_7.4.bb20
-rw-r--r--recipes/mesa/mesa_6.5.2.bb4
-rw-r--r--recipes/mesa/mesa_7.0.2.bb2
-rw-r--r--recipes/mpc/mpc-native_0.8.1.bb3
-rw-r--r--recipes/mpc/mpc.inc6
-rw-r--r--recipes/mpc/mpc_0.8.1.bb10
-rw-r--r--recipes/networkmanager/networkmanager_0.7.1.997.bb6
-rw-r--r--recipes/openmoko-3rdparty/guitartune_svn.bb28
-rw-r--r--recipes/powervr-drivers/libgles-omap3.inc1
-rw-r--r--recipes/qt4/qt-4.6.0.inc2
-rw-r--r--recipes/qt4/qt4-embedded-gles_4.6.0.bb2
-rw-r--r--recipes/qt4/qt4-embedded_4.6.0.bb1
-rw-r--r--recipes/qt4/qt4-tools-sdk_4.6.0.bb74
-rw-r--r--recipes/qt4/qt4-x11-free-gles_4.6.0.bb2
-rw-r--r--recipes/qt4/qt4-x11-free_4.6.0.bb1
-rw-r--r--recipes/qt4/qt4.inc57
-rw-r--r--recipes/qt4/wolfenqt-e_git.bb5
-rw-r--r--recipes/qt4/wolfenqt.inc19
-rw-r--r--recipes/qt4/wolfenqt_git.bb5
-rw-r--r--recipes/sg3-utils/sg3-utils_1.24.bb4
-rw-r--r--recipes/shr/initscripts-shr/palmpre/usb-gadget.sh10
-rw-r--r--recipes/shr/initscripts-shr_0.0.1.bb8
-rw-r--r--recipes/shr/shr-launcher_svn.bb10
-rw-r--r--recipes/tasks/task-shr-feed.bb1
-rw-r--r--recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch12
-rw-r--r--recipes/uclibc/uclibc.inc3
-rw-r--r--recipes/uclibc/uclibc_nptl.bb1
-rw-r--r--recipes/xorg-driver/xf86-input-tslib/dontfloodevents006.patch22
-rw-r--r--recipes/xorg-driver/xf86-input-tslib_0.0.6.bb4
113 files changed, 9634 insertions, 380 deletions
diff --git a/conf/checksums.ini b/conf/checksums.ini
index 0fe5acb594..8fc614beb3 100644
--- a/conf/checksums.ini
+++ b/conf/checksums.ini
@@ -58,6 +58,14 @@ sha256=8930ebfdc8a606d8cb26f073d4700460c3289fb79e943e12948329e17336ca47
md5=779472ae02c2a99937879a8d1d4b9b25
sha256=cfb98e7635c985733dba0fb9c3cadee22ab70fb3b0db7eac8eacaebc65c92a59
+[http://maxim.org.za/AT91RM9200/2.6/2.6.22-rc1-at91.patch.gz]
+md5=2453815aba40e9487d24822d769fbab1
+sha256=95cec79fb6db261760421c1bb9df8a0f4955e0ee90e08cb9930a4c7a2482c1a6
+
+[http://maxim.org.za/AT91RM9200/2.6/2.6.23-rc3-at91.patch.gz]
+md5=822f2f85b658fb1f39b8a20fab781cfc
+sha256=e8ead43fa562cc76ac34d0d4841fd1e4f4964a830403801433e34961d1ce0e84
+
[http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz]
md5=4469d6336f9659f1725fedd4a52261ad
sha256=7a960180e7873b1bdb522e76b0423b5c2c1b8efe1d30d7ca80c41eb97d822b2d
@@ -1542,9 +1550,9 @@ sha256=68b1d0acd1a6e17d91412635cd4f65ba58d293e62a01475a43f3712c49a46e7d
md5=03e5e7ab8ac3acc59661c6e9c09089b7
sha256=fcda8bca508490bea642c83fcf718565bf4ed4c50f2d7b34761da61fe2e6bc9d
-[http://downloads.vdm-design.de/aceofpenguins-launcher-0.3.tar.gz]
-md5=2fe7dcdbbdbf3b2821f627e02c406caa
-sha256=13013cd19cb165825f4ef5cc790b0e103705f68ece470fd12012db97e3e60839
+[http://downloads.vdm-design.de/aceofpenguins-launcher-0.4.tar.gz]
+md5=40f19a26cbfa35de8346efe582b5d984
+sha256=ea09581b3a31e3409968876461da3e384dcb0000abbb0060ae14e4fccb4994e1
[http://downloads.sourceforge.net/acpid/acpid-1.0.10.tar.gz]
md5=61156ef32015c56dc0f2e3317f4ae09e
@@ -6734,6 +6742,10 @@ sha256=e4db7a6305ffe2333fae08c940ded8f7e5b02999e0917b0ea4ef3764c80f58c8
md5=145f4d2ba24c54288bad2d66ddd2baf2
sha256=d7bcc7cafb1c78f00b380dc2facdb82c5a2fb1475e1ddfc8e5d44a2b855cec1a
+[http://www.fbreader.org/fbreader-sources-0.12.1.tgz]
+md5=7236d094a91f26d19a3cfd3db8fcf946
+sha256=02a6a143abf3b5ab875392acef8d67f5cc991ea364e3bd250628e6a31b1f9923
+
[http://only.mawhrin.net/fbreader/obsolete/fbreader-sources-0.7.3d.tgz]
md5=705a89bb03860fb312a7afa25db0310e
sha256=ec9f93968147526a9b2dd17e31a6d2795356b2eeed309f796f655b37b98abd83
@@ -10962,6 +10974,10 @@ sha256=01ba0c34c3bf3bc4ea7728550e3bcfca779a48907ea4d6d9e5a83e8678df5096
md5=dbb085088337e2dd8c0216c36523ea06
sha256=68a114ece326b258b26259d31b9bb59c10049ff0162bcaa0f4f7a7dea9d244dc
+[http://gupnp.org/sources/gssdp/gssdp-0.7.1.tar.gz]
+md5=725c32e8f92a072cc34f0e091937df2a
+sha256=8eaab799f699836770ec2fcc08abfef2f824a82ae959c6af7b39ffb6968b9fd7
+
[http://gstreamer.freedesktop.org/src/gst-ffmpeg/gst-ffmpeg-0.10.1.tar.bz2]
md5=e21aef9a84d67dea9a68c1379781f763
sha256=a2c877c38d057875c7dfbf1803030c5cc9707020b77f91673500d6ea8d858607
@@ -11578,6 +11594,10 @@ sha256=f78e4b0a361b67805892c1a0e72f3cef92fbc96112157895660a478979ddeef4
md5=bfb12195c76bb6632bd917f2c2bc12d6
sha256=241e416cbe2c02f413fde82a8587bfe2fe9915fbed3a6fb20c86520b8d7543ef
+[http://gupnp.org/sources/gupnp/gupnp-0.13.1.tar.gz]
+md5=9b5fcf8146ba9a2bd84382f61717aa0e
+sha256=e97faaebf0da42617a43de4c7c1148a51148f2a2cdaa2a10855e377b968a07fd
+
[http://gupnp.org/sources/gupnp-av/gupnp-av-0.2.tar.gz]
md5=ab485bf263d0a3d2f771817241c970b9
sha256=782e4e45abcba1b3fe34276580653f4dbfbe8a26eee69a290675dfa7faa309f8
@@ -11586,6 +11606,10 @@ sha256=782e4e45abcba1b3fe34276580653f4dbfbe8a26eee69a290675dfa7faa309f8
md5=f6e813591ff89e8e61a46f416046450f
sha256=34b6e104b480e501e430daa68fca63906a939a6cb02bc43814ed06d2856a72ac
+[http://gupnp.org/sources/gupnp-av/gupnp-av-0.5.2.tar.gz]
+md5=15ccfbb17553bf1cb00bf8e1d801005e
+sha256=dfd438f40e31047d6f06db30db05d5f876c6294a8509f170482d712f552e9892
+
[http://gupnp.org/sources/gupnp-tools/gupnp-tools-0.6.1.tar.gz]
md5=27d3a55eae2243661f49a7cf40e1e195
sha256=c4004de997674bb54d5844391dd824c6aee78184b075e7ea5d8f816e0d031e25
@@ -19134,6 +19158,10 @@ sha256=76e151048a736cf490630ace4909f2d4b65f1afe2447c7fd794d270eb956e045
md5=48897aeb3a7ee5c64f30e56789f105a8
sha256=7b549ca4af77fc5b0472df1ecd9e76d2f8415258ddcfb63dfa64a55a04e1e590
+[http://www.multiprecision.org/mpc/download/mpc-0.8.1.tar.gz]
+md5=5b34aa804d514cc295414a963aedb6bf
+sha256=e664603757251fd8a352848276497a4c79b7f8b21fd8aedd5cc0598a38fee3e4
+
[http://downloads.sourceforge.net/musicpd/mpd-0.11.2.tar.gz]
md5=b6e913dc4b63b161bb1b96030763c3a7
sha256=e784f78b3cce93367c105619c690465b78a88d3e4c616cc829f4cb5e66f2bbe0
@@ -21030,6 +21058,10 @@ sha256=34beecc0dd156267e8004fb79efea9bf97e1157ed597bdde1841c16def2e9195
md5=955088e5139ef5914d44fe15b4a50b90
sha256=924c27b95f0c3792bf3d48ff854ef145e8916452b917067d653f59102f03c0d4
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.18.8.bz2]
+md5=090f582d2a0e1951d500b2e55f7df7b4
+sha256=cde777361d2a4818ea9c215e195a87da4847dafa94a10ac8c9f4bd8dc49fde3f
+
[http://www.muru.com/linux/omap/patches/patch-2.6.19-omap1.bz2]
md5=3590e42e1a6ea4676df5b187b830b402
sha256=b7cba2e38e81abb4d1d33c3f7f7c028310b392d5ee1ed5e3b2137e024598d903
@@ -21058,6 +21090,10 @@ sha256=808ca62a66d7cfe40123301c2f51fc0dcd817ee3bb0df96d1e9e97cc3bad6a9c
md5=b9c8734471a454806c77f040fcf9869b
sha256=5ee24e1c5636bcffed155b1c01d7d09fedb135fa2458c190a0da03a82c8c2f60
+[http://kernel.org/pub/linux/kernel/v2.6/testing/v2.6.22/patch-2.6.22-rc1.bz2]
+md5=9bc06492dce31c87f1cdfa2ce5b0cf4c
+sha256=dd33f3e9059bed043194ee5200239f26d3ad607ab5c872e7ce92595c1eb5d0e4
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.22.19.bz2]
md5=066cc3bdd2783dcd01f6ff466e449ec0
sha256=829c48b49c71d89468f2a5a05587714811197545eeba31e9643cabacf344d33a
@@ -21086,6 +21122,10 @@ sha256=c2085fc8fc6df586ef8c19a4562b84162f0b77956d691aa4fbee5e90c9800cb7
md5=736ea68a03158c24e55aa95e0ab15ceb
sha256=4d2c13dee5ea7bd8b5cdbf63afa9383b45f6bad1f75b163c49e086a5030a04de
+[http://kernel.org/pub/linux/kernel/v2.6/testing/v2.6.23/patch-2.6.23-rc5.bz2]
+md5=8253467313749aee6065093cd3c5fd9c
+sha256=c8c2068183aca79c46182f3d3fe6d7579cd60809681d42c52d71cf1873cd1a0e
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.23.1.bz2]
md5=896c5d4e7fd68d37b8c16e5d2842563b
sha256=55e811b4d4b0cbfde500bdd5455b7180d3def3dbdc52314520b7327d32b23f42
@@ -21182,6 +21222,10 @@ sha256=7fc735b85225850dae3c3acfe4bf0fe59f4c884d7c37f89395867dc73bf8f8af
md5=e75d87c11065955871b2e005d0242c99
sha256=0e5e633e81f1dcc0dcd2372494fbd2d6a72b28bb177a3285c4766a9659b454bd
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.27.41.bz2]
+md5=a431f1f68badcfa5a6cecc6b4d52d319
+sha256=ed3ec802ccb3585bf0a57724471fa24b98cce5caef8f35d6c26fed7c9213168d
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.27.8.bz2]
md5=ec23e3dce22b23ca681199fe515f10fb
sha256=31c35db09289c6e0436a258745d7180e0cd8f567949f27b3dab5a57a3664ed2f
@@ -21226,6 +21270,10 @@ sha256=25a9aff47cc568e4bcaa4377cacbcae11ea454aeeea9519aa3a1b6dbffea713c
md5=2f399a5e286a9fe7cb40bfd3d42a7a3d
sha256=79a9913a74e58af6431bb952aac2cf0a1f4422287f420844f24ca6bc5ed0fdc4
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.30.10.bz2]
+md5=6485fe0cf0f0220493647505bfd2f7b0
+sha256=a1ffb806d7d0083aa8d0525cbccede4172f4a44c8df1ddfeece629b6d8304201
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.30.4.bz2]
md5=d0fc44b54ba5953140b3f2aa9a1f2580
sha256=b7716971e73c8fa96ecd9cdb598c8bd3a2a946e289cfef5dcfaa11a0022737ce
@@ -21254,6 +21302,18 @@ sha256=bc670682ed9b81d5d3859130c600601bd72053fd738b51b2daf8ddf3f4614a66
md5=6cac5e59d5562b591cdda485941204d5
sha256=41e7d98a205d58a62901daf4e46ecf5fb0b177e5a233a3c0ad3250a3a0abe8aa
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.31.7.bz2]
+md5=801b9c379a12339c0210dc5d1a8e9537
+sha256=f9edbb3c15c4094d840d5954131bb91c4513d333b30bced7b5c906d36c843d5e
+
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.31.8.bz2]
+md5=0fda994c76a981a67464f43c766f2180
+sha256=c120bf7018749fca765773975b3d4444fa244781ceb844e4b9bab9b478cad213
+
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.32.1.bz2]
+md5=abc24a9beb8431a75301dd3884b37a3c
+sha256=26f6fad3259ad6d2356cfa04462ace5fd3b5cecec410bbcefe7d5f6dbeb00baa
+
[http://www.muru.com/linux/omap/patches/patch-2.6.9-omap1.bz2]
md5=d6249654087f0bcafaa860ac573316a4
sha256=91806347cb386002a8bfd20ee66e536e4a7dfb01f207dd751341f2971090d9ac
@@ -23866,6 +23926,10 @@ sha256=04ecd2577ee0c210df610b4a5d870f2bda57c80962fb5b51ae9c4a94098c726c
md5=7a0c73ccf0e31808ad37b61e730cd10e
sha256=33d2363d40b1db7568864dc063ce4af70e50a9f9292a16445f0b167618d8fbba
+[http://ftp.acc.umu.se/pub/GNOME/sources/rygel/0.4/rygel-0.4.6.tar.bz2]
+md5=f28c48e2caf79b8e9056636259056de7
+sha256=67c58590430d04799644d164e4a5977356d95528864ea787356e35dd314e660f
+
[http://www.informatik.hu-berlin.de/~tkunze/zaurus/patches/sa1100-dma.patch]
md5=4336cca098b577608813a7c1454f2edd
sha256=c532c14ffb9358d1e0dbb67fce113c6aec0dd9c77ad41875222d69979baac9e7
@@ -31530,6 +31594,10 @@ sha256=cc3b203acbd745a7c3b6ea3d8e74d7fb07cf8f8aa92bcaa3b2edb023dee02a36
md5=6660ce6dac0a6ca495a0e954cb6b40a2
sha256=fc619215ca90cab48efae7bdf524d1de690b75832f07f3cbcb6d5c3910d402a1
+[http://ftp.gnome.org/pub/GNOME/sources/zenity/2.28/zenity-2.28.0.tar.bz2]
+md5=262c476aebbf67a7043cd80c6a03add3
+sha256=3747a23ce46b0ee68e6261d669ae461f0f460d9c6ad19e04acbd518ddfcc8c63
+
[http://www.progsoc.org/~wildfire/zeroconf/download/zeroconf-0.6.1.tar.gz]
md5=31ac40fdaf24b3e666ed83c1320dd7a5
sha256=3e93416ad44202c2952a1342dad12f2ad0e61dd3f1c59110d9ad8cb1d4c639e5
diff --git a/conf/compatibility-providers.conf b/conf/compatibility-providers.conf
index d2ee2b6a23..10a81f2943 100644
--- a/conf/compatibility-providers.conf
+++ b/conf/compatibility-providers.conf
@@ -61,7 +61,7 @@ PREFERRED_PROVIDER_virtual/gail ?= "gtk+"
PREFERRED_PROVIDER_virtual/javac-native ?= "ecj-bootstrap-native"
PREFERRED_PROVIDER_virtual/java-initial ?= "jamvm-initial"
PREFERRED_PROVIDER_virtual/java-native ?= "jamvm-native"
-PREFERRED_PROVIDER_virtual/libgl ?= "mesa-dri"
+PREFERRED_PROVIDER_virtual/libgl ?= "mesa"
PREFERRED_PROVIDER_virtual/libsdl ?= "libsdl-x11"
#PREFERRED_PROVIDER_virtual/libusb0 ?= "libusb"
PREFERRED_PROVIDER_virtual/libusb0 ?= "libusb-compat"
diff --git a/conf/distro/include/angstrom-2008-preferred-versions.inc b/conf/distro/include/angstrom-2008-preferred-versions.inc
index d59c99ec00..790b08e3bc 100644
--- a/conf/distro/include/angstrom-2008-preferred-versions.inc
+++ b/conf/distro/include/angstrom-2008-preferred-versions.inc
@@ -23,6 +23,8 @@ PREFERRED_VERSION_libtool-cross = "2.2.6a"
PREFERRED_VERSION_libtool-sdk = "2.2.6a"
PREFERRED_VERSION_libx11 = "1.2"
PREFERRED_VERSION_mesa = "7.4"
+PREFERRED_VERSION_mesa-dri = "7.4"
+PREFERRED_VERSION_mesa-xlib = "7.4"
PREFERRED_VERSION_pango = "1.24.4"
#PREFERRED_VERSION_pixman = "0.13.2"
PREFERRED_VERSION_pkgconfig = "0.23"
diff --git a/conf/distro/include/sane-srcrevs-fso.inc b/conf/distro/include/sane-srcrevs-fso.inc
index 80f8bfeedc..d975ec261a 100644
--- a/conf/distro/include/sane-srcrevs-fso.inc
+++ b/conf/distro/include/sane-srcrevs-fso.inc
@@ -30,7 +30,7 @@ SRCREV_pn-fso-specs ?= "14de522adbea80416df811085b3112cd1d5d5336"
SRCREV_pn-gsmd2 ?= "c16883a079aeff8780e5d461ec4e8348537ab4d8"
SRCREV_pn-libeflvala ?= "d07db4fbd24c9d5dfc9b1fd5024fd651b02f123e"
SRCREV_pn-libfso-glib ?= "9a627aa1c33f5a46ae4316fc274126ca5f37e979"
-SRCREV_pn-libframeworkd-glib ?= "52c988638e517e0f06ea7841b80692c729a9f487"
+SRCREV_pn-libframeworkd-glib ?= "e5cc248f241ea549810051fb95f0250bd1224cf9"
SRCREV_pn-libgsm0710 ?= "cd564c8782f018e0d65fb8716c99a6040b5bd166"
SRCREV_pn-libgsm0710mux ?= "e81ed512ec86e31d0d0119826afa9d1302651693"
SRCREV_pn-libpersistence ?= "26180fd3c0fe4eb6abb7440f10e51d997719b97a"
diff --git a/conf/distro/include/sane-srcrevs.inc b/conf/distro/include/sane-srcrevs.inc
index 3c3b671574..6f3a35fc90 100644
--- a/conf/distro/include/sane-srcrevs.inc
+++ b/conf/distro/include/sane-srcrevs.inc
@@ -46,7 +46,7 @@ SRCREV_pn-dfu-util-native ?= "4160"
SRCREV_pn-disko ?= "f52597b8d5d584811cbe8f9e0bf25ea372526953"
SRCREV_pn-diversity-daemon ?= "571"
SRCREV_pn-diversity-radar ?= "453"
-SRCREV_pn-e-tasks ?= "18"
+SRCREV_pn-e-tasks ?= "22"
SRCREV_pn-e-wm-config-illume-shr ?= "bbcec18f0ebd47e4f6eea88b9b774edf7400e752"
SRCREV_pn-e-wm-illume-dict-pl ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-e-wm-menu-shr ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
@@ -86,7 +86,7 @@ SRCREV_pn-fsoraw ?= "20"
SRCREV_pn-fstests ?= "204"
SRCREV_pn-gabriel ?= "38"
SRCREV_pn-gabriel-native ?= "38"
-GCCREV ?= "145550"
+GCCREV ?= "155234"
SRCREV_pn-gcc ?= ${GCCREV}
SRCREV_pn-gcc-cross ?= ${GCCREV}
SRCREV_pn-gcc-cross-initial ?= ${GCCREV}
@@ -104,14 +104,15 @@ SRCREV_pn-gpe-theme-neo ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-gridpad ?= "194"
SRCREV_pn-gtk-theme-neo ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-gtkhtml2 ?= "1158"
+SRCREV_pn-guitartune ?= "11"
SRCREV_pn-gypsy ?= "134"
SRCREV_pn-hildon-1 ?= "14429"
SRCREV_pn-icon-theme-neo ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-illume-keyboards-shr ?= "c23bde500152c303971b4c2ec5c463d760b7cc14"
SRCREV_pn-illume-theme-asu ?= "4881"
SRCREV_pn-illume-theme-freesmartphone ?= "b1b0f6adc59e6f72a3929771058e3750bf181bc5"
-SRCREV_pn-intone ?= "66"
-SRCREV_pn-intone-video ?= "9"
+SRCREV_pn-intone ?= "75"
+SRCREV_pn-intone-video ?= "12"
SRCREV_pn-intuition ?= "194"
SRCREV_pn-kismet ?= "2285"
SRCREV_pn-kismet-newcore ?= "2285"
@@ -292,7 +293,7 @@ SRCREV_pn-shr-config ?= "37dd7ac950e2bfd438801faf34c29fccfdbbaccf"
SRCREV_pn-shr-contacts ?= "9d7ca1cecb93022e5b890cd87756ac6f072710ca"
SRCREV_pn-shr-dialer ?= "9d7ca1cecb93022e5b890cd87756ac6f072710ca"
SRCREV_pn-shr-installer ?= "f17fa104639113fb0d3212b6bba366c092854cde"
-SRCREV_pn-shr-launcher ?= "92"
+SRCREV_pn-shr-launcher ?= "99"
SRCREV_pn-shr-messages ?= "9d7ca1cecb93022e5b890cd87756ac6f072710ca"
SRCREV_pn-shr-settings ?= "ef06fe86c49958673889671c46682c0b2f1d74d7"
SRCREV_pn-shr-specs ?= "a881cd133439737708d4d4d150500246ceff7c7d"
diff --git a/conf/machine/da850-omapl138-evm.conf b/conf/machine/da850-omapl138-evm.conf
index b61c00ca27..aaf7583e5d 100644
--- a/conf/machine/da850-omapl138-evm.conf
+++ b/conf/machine/da850-omapl138-evm.conf
@@ -3,7 +3,7 @@
#@DESCRIPTION: Machine configuration for the TI DA850/OMAPL138 EVM board
require conf/machine/include/davinci.inc
-require conf/machine/include/omapl138.inc
+require conf/machine/include/omapl138.conf
UBOOT_MACHINE = "da850_omapl138_evm_config"
UBOOT_ENTRYPOINT = "0xc0008000"
diff --git a/recipes/aceofpenguins/aceofpenguins-launcher_0.3.bb b/recipes/aceofpenguins/aceofpenguins-launcher_0.4.bb
index 2e96d81475..265b8c6651 100644
--- a/recipes/aceofpenguins/aceofpenguins-launcher_0.3.bb
+++ b/recipes/aceofpenguins/aceofpenguins-launcher_0.4.bb
@@ -8,7 +8,7 @@ SECTION = "x11/application"
PACKAGE_ARCH = "all"
-PR = "r1"
+PR = "r0"
inherit setuptools
diff --git a/recipes/e17/e-tasks_svn.bb b/recipes/e17/e-tasks_svn.bb
index 9556e146d6..5c07a1bd3c 100644
--- a/recipes/e17/e-tasks_svn.bb
+++ b/recipes/e17/e-tasks_svn.bb
@@ -10,14 +10,8 @@ inherit autotools
PV = "0.0.1+svnr${SRCPV}"
PR = "r1"
-SRC_URI = "svn://e-tasks.googlecode.com/svn/trunk;module=.;proto=http"
-S = "${WORKDIR}"
-
-do_configure_prepend() {
- # all links to /usr/share/automake-1.10/
- rm -f ${S}/depcomp ${S}/config.guess ${S}/config.sub ${S}/INSTALL ${S}/install-sh ${S}/missing
- touch ${S}/INSTALL
-}
+SRC_URI = "svn://e-tasks.googlecode.com/svn;module=trunk;proto=http"
+S = "${WORKDIR}/trunk"
do_install_append() {
install -d "${D}/${datadir}/pixmaps"
diff --git a/recipes/fbreader/fbreader-0.12.1/Makefile.patch b/recipes/fbreader/fbreader-0.12.1/Makefile.patch
new file mode 100644
index 0000000000..0e498c7125
--- /dev/null
+++ b/recipes/fbreader/fbreader-0.12.1/Makefile.patch
@@ -0,0 +1,22 @@
+diff -uri fbreader-0.12.1.orig/fbreader/Makefile fbreader-0.12.1/fbreader/Makefile
+--- fbreader-0.12.1.orig/fbreader/Makefile 2009-12-13 09:03:11.000000000 +0100
++++ fbreader-0.12.1/fbreader/Makefile 2009-12-14 17:18:47.548638783 +0100
+@@ -37,7 +37,6 @@
+ @install $(TARGET) $(DESTDIR)$(BINDIR)/FBReader
+ @install -d $(FBSHAREDIR)
+ @install -d $(FBSHAREDIR)/help
+- @./scripts/install_help.sh $(VARIANT) $(FBSHAREDIR)/help
+ @install -d $(FBSHAREDIR)/network
+ @install -m 0644 $(wildcard data/network/*.xml) $(FBSHAREDIR)/network
+ @install -d $(FBSHAREDIR)/network/certificates
+@@ -58,8 +57,8 @@
+ @install -d $(FBSHAREDIR)/resources
+ @install -m 0644 $(wildcard data/resources/*.xml) $(FBSHAREDIR)/resources
+ @install -d $(DESTDIR)$(APPIMAGEDIR_REAL)
+- @install -m 0644 $(wildcard data/icons/toolbar/$(VARIANT)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
+- @install -m 0644 $(wildcard data/icons/filetree/$(VARIANT)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
++ @install -m 0644 $(wildcard data/icons/toolbar/$(TARGET_ARCH)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
++ @install -m 0644 $(wildcard data/icons/filetree/$(TARGET_ARCH)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
+ @install -m 0644 $(wildcard data/icons/booktree/new/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
+ @make -C $(TARGET_ARCH) RESOLUTION=$(RESOLUTION) install
+
diff --git a/recipes/fbreader/fbreader_0.12.1.bb b/recipes/fbreader/fbreader_0.12.1.bb
new file mode 100644
index 0000000000..c1c3025a1a
--- /dev/null
+++ b/recipes/fbreader/fbreader_0.12.1.bb
@@ -0,0 +1,35 @@
+DESCRIPTION = "FBreader is an ebook reader"
+HOMEPAGE = "http://www.fbreader.org"
+SECTION = "x11/utils"
+PRIORITY = "optional"
+LICENSE = "GPLv2"
+DEPENDS = "gtk+ enca expat bzip2 libgpewidget virtual/libiconv liblinebreak libfribidi"
+
+SRC_URI = "http://www.fbreader.org/fbreader-sources-${PV}.tgz \
+file://Makefile.patch;patch=1"
+
+# Set the defaults
+READER_RESOLUTION ?= "1024x600"
+READER_ARCH ?= "desktop"
+READER_UI ?= "gtk"
+READER_STATUS ?= "release"
+
+FILES_${PN} += "${datadir}/FBReader ${datadir}/zlibrary ${libdir}/zlibrary"
+
+CFLAGS_append = " RESOLUTION=${READER_RESOLUTION} INSTALLDIR=${prefix}"
+EXTRA_OEMAKE = "CC='${CXX}' LD='${CXX}' OE_CFLAGS='${CXXFLAGS}' INCPATH='${STAGING_INCDIR}' LIBPATH='${STAGING_LIBDIR}'"
+
+inherit pkgconfig
+
+do_configure() {
+ cd ${WORKDIR}/${PN}-${PV}
+ mv makefiles/target.mk makefiles/target.mk.orig
+
+ echo "TARGET_ARCH = ${READER_ARCH}" > makefiles/target.mk
+ echo "UI_TYPE = ${READER_UI}" >> makefiles/target.mk
+ echo "TARGET_STATUS = ${READER_STATUS}" >> makefiles/target.mk
+}
+
+do_install() {
+ oe_runmake install DESTDIR=${D} RESOLUTION=${READER_RESOLUTION}
+}
diff --git a/recipes/gabriel/gabriel_svn.bb b/recipes/gabriel/gabriel_svn.bb
index 94fdfe99e7..f525885fb0 100644
--- a/recipes/gabriel/gabriel_svn.bb
+++ b/recipes/gabriel/gabriel_svn.bb
@@ -2,7 +2,7 @@ DESCRIPTION = "Gabriel is a small utility to enable D-Bus clients to connect to
daemon running on a remote machine, through SSH. In simple words, gabriel is a proxy for \
a dbus daemon running on a remote machine."
LICENSE = "GPL"
-DEPENDS = "libssh glib-2.0 dbus glib-dbus"
+DEPENDS = "libssh glib-2.0 dbus dbus-glib"
SECTION = "console/network"
PV = "0.0.0+svnr${SRCPV}"
diff --git a/recipes/gcc/gcc-cross-initial_svn.bb b/recipes/gcc/gcc-cross-initial_svn.bb
index 067ab61be3..0650a81ecf 100644
--- a/recipes/gcc/gcc-cross-initial_svn.bb
+++ b/recipes/gcc/gcc-cross-initial_svn.bb
@@ -1,7 +1,7 @@
require gcc-cross_${PV}.bb
require gcc-cross-initial.inc
-DEPENDS += "gmp-native mpfr-native"
+DEPENDS += "gmp-native mpfr-native mpc-native"
EXTRA_OECONF += " --disable-libmudflap \
--disable-libgomp \
diff --git a/recipes/gcc/gcc-cross-intermediate_svn.bb b/recipes/gcc/gcc-cross-intermediate_svn.bb
index a9ae5bdf20..93cd35730b 100644
--- a/recipes/gcc/gcc-cross-intermediate_svn.bb
+++ b/recipes/gcc/gcc-cross-intermediate_svn.bb
@@ -1,7 +1,7 @@
require gcc-cross_${PV}.bb
require gcc-cross-intermediate.inc
-DEPENDS += "gmp-native mpfr-native"
+DEPENDS += "mpc-native gmp-native mpfr-native"
EXTRA_OECONF += " --disable-libmudflap \
--disable-libgomp \
diff --git a/recipes/gcc/gcc-cross-sdk_svn.bb b/recipes/gcc/gcc-cross-sdk_svn.bb
index 22937da3c4..752266be5f 100644
--- a/recipes/gcc/gcc-cross-sdk_svn.bb
+++ b/recipes/gcc/gcc-cross-sdk_svn.bb
@@ -7,7 +7,7 @@ require gcc-cross-sdk.inc
require gcc-configure-sdk.inc
require gcc-package-sdk.inc
-DEPENDS += "gmp-native mpfr-native"
+DEPENDS += "mpc-native gmp-native mpfr-native"
EXTRA_OECONF += "--disable-libunwind-exceptions --disable-libssp \
--disable-libgomp --disable-libmudflap \
diff --git a/recipes/gcc/gcc-cross_svn.bb b/recipes/gcc/gcc-cross_svn.bb
index 2186a81944..af46e3ac88 100644
--- a/recipes/gcc/gcc-cross_svn.bb
+++ b/recipes/gcc/gcc-cross_svn.bb
@@ -4,6 +4,8 @@ require gcc-cross4.inc
require gcc-configure-cross.inc
require gcc-package-cross.inc
+DEPENDS += "mpc-native"
+
SRC_URI_append_fail-fast = " file://zecke-no-host-includes.patch;patch=1 "
EXTRA_OECONF += " --disable-libunwind-exceptions --with-mpfr=${STAGING_DIR_NATIVE}${prefix_native}"
diff --git a/recipes/gcc/gcc-svn.inc b/recipes/gcc/gcc-svn.inc
index ab39a73724..c39546dc96 100644
--- a/recipes/gcc/gcc-svn.inc
+++ b/recipes/gcc/gcc-svn.inc
@@ -4,7 +4,7 @@ ARM_INSTRUCTION_SET = "arm"
require gcc-common.inc
-DEPENDS = "mpfr gmp"
+DEPENDS = "mpfr gmp mpc"
GCC_BASE ?= "4.5"
PV = "${GCC_BASE}+svnr${SRCPV}"
diff --git a/recipes/gnome/zenity_2.28.0.bb b/recipes/gnome/zenity_2.28.0.bb
new file mode 100644
index 0000000000..0d8d208f16
--- /dev/null
+++ b/recipes/gnome/zenity_2.28.0.bb
@@ -0,0 +1,11 @@
+DESCRIPTION = "A tool to display dialogs from the command line and shell scripts."
+LICENSE = "GPL"
+
+DEPENDS = "gtk+ libglade glib-2.0 libnotify"
+
+inherit gnome
+
+do_configure_prepend() {
+ sed -i -e '/-I$(includedir)/d' src/Makefile.am
+}
+
diff --git a/recipes/intone-video/intone-video_svn.bb b/recipes/intone-video/intone-video_svn.bb
index 30ac70acd2..0f7979f122 100644
--- a/recipes/intone-video/intone-video_svn.bb
+++ b/recipes/intone-video/intone-video_svn.bb
@@ -9,26 +9,15 @@ RDEPENDS = "mplayer lame libxv libsdl-x11"
PV = "0.13+svnr${SRCPV}"
PR = "r1"
-SRC_URI = "svn://intone-video.googlecode.com/svn/trunk;module=.;proto=http"
-S = "${WORKDIR}"
+SRC_URI = "svn://intone-video.googlecode.com/svn;module=trunk;proto=http"
+S = "${WORKDIR}/trunk"
inherit autotools
-do_configure_prepend() {
- rm -f "${S}/INSTALL"
- touch "${S}/INSTALL"
- sed -i 's/intone/intone-video/g' ${S}/configure.ac
- sed -i 's/\/doc\/intone$/\/share\/doc\/intone-video/g' ${S}/Makefile.am
- sed -i '/^EXTRA_DIST = $(glade_DATA)/d' ${S}/src/Makefile.am
- sed -i '/^gladedir = $(datadir)\/intone\/glade/d' ${S}/src/Makefile.am
- sed -i '/^glade_DATA = intone.glade/d' ${S}/src/Makefile.am
-}
-
do_install_append() {
- mv ${D}/${bindir}/intone ${D}/${bindir}/intone-video
- mkdir -p "${D}/${datadir}/pixmaps"
+ install -d "${D}/${datadir}/pixmaps"
install -m 0644 "${S}/resources/intone-video.png" "${D}/${datadir}/pixmaps"
- mkdir -p "${D}/${datadir}/applications"
+ install -d "${D}/${datadir}/applications"
install -m 0644 "${S}/resources/intone-video.desktop" "${D}/${datadir}/applications"
}
diff --git a/recipes/intone/intone_svn.bb b/recipes/intone/intone_svn.bb
index e1af70d9aa..29773c2638 100644
--- a/recipes/intone/intone_svn.bb
+++ b/recipes/intone/intone_svn.bb
@@ -9,18 +9,12 @@ RDEPENDS = "mplayer lame libxv libsdl-x11"
PV = "0.66+svnr${SRCPV}"
PR = "r2"
-SRC_URI = "svn://intone.googlecode.com/svn/trunk;module=.;proto=http \
+SRC_URI = "svn://intone.googlecode.com/svn;module=trunk;proto=http \
file://vorbis-include-id3tag.patch;pnum=1;patch=1;maxrev=18"
-S = "${WORKDIR}"
+S = "${WORKDIR}/trunk"
inherit autotools
-do_configure_prepend() {
- rm -f "${S}/INSTALL"
- touch "${S}/INSTALL"
- sed -i 's/{prefix}\/doc\/intone$/{prefix}\/share\/doc\/intone/g' ${S}/Makefile.am
-}
-
do_install_append() {
mkdir -p "${D}/${datadir}/pixmaps"
install -m 0644 "${S}/resources/intone.png" "${D}/${datadir}/pixmaps"
diff --git a/recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch b/recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch
new file mode 100644
index 0000000000..be5d31d5e1
--- /dev/null
+++ b/recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch
@@ -0,0 +1,53 @@
+Patch to tc/Makefile is from
+http://bugs.gentoo.org/236861
+
+configure patch is based on suggestion from
+PR 5117
+
+http://bugs.openembedded.org/show_bug.cgi?id=5147
+
+-Khem
+Index: iproute2-2.6.29/configure
+===================================================================
+--- iproute2-2.6.29.orig/configure 2009-12-14 11:07:42.000000000 -0800
++++ iproute2-2.6.29/configure 2009-12-14 11:08:38.000000000 -0800
+@@ -16,7 +16,7 @@ int main(int argc, char **argv) {
+ return 0;
+ }
+ EOF
+-gcc -I$INCLUDE -o /tmp/atmtest /tmp/atmtest.c -latm >/dev/null 2>&1
++$CC -I$INCLUDE -o /tmp/atmtest /tmp/atmtest.c -latm >/dev/null 2>&1
+ if [ $? -eq 0 ]
+ then
+ echo "TC_CONFIG_ATM:=y" >>Config
+@@ -49,7 +49,7 @@ int main(int argc, char **argv) {
+ }
+
+ EOF
+-gcc -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
++$CC -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
+
+ if [ $? -eq 0 ]
+ then
+@@ -81,7 +81,7 @@ int main(int argc, char **argv) {
+ }
+
+ EOF
+-gcc -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
++$CC -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
+
+ if [ $? -eq 0 ]
+ then
+Index: iproute2-2.6.29/tc/Makefile
+===================================================================
+--- iproute2-2.6.29.orig/tc/Makefile 2009-12-14 11:10:27.000000000 -0800
++++ iproute2-2.6.29/tc/Makefile 2009-12-14 11:11:39.000000000 -0800
+@@ -100,7 +100,7 @@ clean:
+ rm -f emp_ematch.yacc.output
+
+ q_atm.so: q_atm.c
+- $(CC) $(CFLAGS) -shared -fpic -o q_atm.so q_atm.c -latm
++ $(CC) $(CFLAGS) $(LDFLAGS) -shared -fpic -o q_atm.so q_atm.c -latm
+
+ %.yacc.c: %.y
+ $(YACC) $(YACCFLAGS) -o $@ $<
diff --git a/recipes/iproute2/iproute2.inc b/recipes/iproute2/iproute2.inc
index 8c8519f230..9792bad4d6 100644
--- a/recipes/iproute2/iproute2.inc
+++ b/recipes/iproute2/iproute2.inc
@@ -4,6 +4,8 @@ SECTION = "base"
LICENSE = "GPL"
DEPENDS = "flex-native bison-native"
+INC_PR = "r3"
+
# Set the DATE in the .bb file
SRC_URI = "http://developer.osdl.org/dev/iproute2/download/${P}-${DATE}.tar.gz"
diff --git a/recipes/iproute2/iproute2_2.6.18.bb b/recipes/iproute2/iproute2_2.6.18.bb
index d442a091db..32e871118e 100644
--- a/recipes/iproute2/iproute2_2.6.18.bb
+++ b/recipes/iproute2/iproute2_2.6.18.bb
@@ -1,4 +1,4 @@
-PR = "r3"
+PR = "${INC_PR}.0"
require iproute2.inc
diff --git a/recipes/iproute2/iproute2_2.6.20.bb b/recipes/iproute2/iproute2_2.6.20.bb
index e72dfccdde..dd1a504142 100644
--- a/recipes/iproute2/iproute2_2.6.20.bb
+++ b/recipes/iproute2/iproute2_2.6.20.bb
@@ -1,6 +1,6 @@
require iproute2.inc
-PR = "r2"
+PR = "${INC_PR}.0"
DATE = "070313"
SRC_URI_append = " file://new-flex-fix.patch;patch=1 \
diff --git a/recipes/iproute2/iproute2_2.6.22.bb b/recipes/iproute2/iproute2_2.6.22.bb
index 1ee0ce7ab2..55e3a7575d 100644
--- a/recipes/iproute2/iproute2_2.6.22.bb
+++ b/recipes/iproute2/iproute2_2.6.22.bb
@@ -1,6 +1,6 @@
require iproute2.inc
-PR = "r1"
+PR = "${INC_PR}.0"
DATE = "070710"
SRC_URI_append = " file://new-flex-fix.patch;patch=1 \
diff --git a/recipes/iproute2/iproute2_2.6.29.bb b/recipes/iproute2/iproute2_2.6.29.bb
index d02573a359..d38dde880f 100644
--- a/recipes/iproute2/iproute2_2.6.29.bb
+++ b/recipes/iproute2/iproute2_2.6.29.bb
@@ -1,10 +1,11 @@
require iproute2.inc
-PR = "r1"
+PR = "${INC_PR}.0"
SRC_URI = "http://developer.osdl.org/dev/iproute2/download/${P}.tar.bz2 \
file://new-flex-fix.patch;patch=1 \
file://compilation-fix.patch;patch=1 \
+ file://use-cross-compiler.patch;patch=1 \
"
S = "${WORKDIR}/iproute2-${PV}"
diff --git a/recipes/kexec/files/dietlibc.patch b/recipes/kexec-tools/files/dietlibc.patch
index 0048d7a6a2..0048d7a6a2 100644
--- a/recipes/kexec/files/dietlibc.patch
+++ b/recipes/kexec-tools/files/dietlibc.patch
diff --git a/recipes/kexec/files/fix-arm-arch-detection.patch b/recipes/kexec-tools/files/fix-arm-arch-detection.patch
index 6116c5bb84..6116c5bb84 100644
--- a/recipes/kexec/files/fix-arm-arch-detection.patch
+++ b/recipes/kexec-tools/files/fix-arm-arch-detection.patch
diff --git a/recipes/kexec/files/kexec-arm-atags.patch b/recipes/kexec-tools/files/kexec-arm-atags.patch
index eb3cb75990..eb3cb75990 100644
--- a/recipes/kexec/files/kexec-arm-atags.patch
+++ b/recipes/kexec-tools/files/kexec-arm-atags.patch
diff --git a/recipes/kexec/files/kexec-klibc.patch b/recipes/kexec-tools/files/kexec-klibc.patch
index 090b6231f1..090b6231f1 100644
--- a/recipes/kexec/files/kexec-klibc.patch
+++ b/recipes/kexec-tools/files/kexec-klibc.patch
diff --git a/recipes/kexec/files/kexec-static.patch b/recipes/kexec-tools/files/kexec-static.patch
index 549300576e..549300576e 100644
--- a/recipes/kexec/files/kexec-static.patch
+++ b/recipes/kexec-tools/files/kexec-static.patch
diff --git a/recipes/kexec/files/kexec-tools-2-arm-add-uImage.patch b/recipes/kexec-tools/files/kexec-tools-2-arm-add-uImage.patch
index 69a1588ba2..69a1588ba2 100644
--- a/recipes/kexec/files/kexec-tools-2-arm-add-uImage.patch
+++ b/recipes/kexec-tools/files/kexec-tools-2-arm-add-uImage.patch
diff --git a/recipes/kexec/files/kexec-tools-2-headers.patch b/recipes/kexec-tools/files/kexec-tools-2-headers.patch
index 091e12d9a8..091e12d9a8 100644
--- a/recipes/kexec/files/kexec-tools-2-headers.patch
+++ b/recipes/kexec-tools/files/kexec-tools-2-headers.patch
diff --git a/recipes/kexec/files/kexec-tools-2-klibc.patch b/recipes/kexec-tools/files/kexec-tools-2-klibc.patch
index 2008829b60..2008829b60 100644
--- a/recipes/kexec/files/kexec-tools-2-klibc.patch
+++ b/recipes/kexec-tools/files/kexec-tools-2-klibc.patch
diff --git a/recipes/kexec/files/kexec-tools-arm.patch b/recipes/kexec-tools/files/kexec-tools-arm.patch
index 6e43b76096..6e43b76096 100644
--- a/recipes/kexec/files/kexec-tools-arm.patch
+++ b/recipes/kexec-tools/files/kexec-tools-arm.patch
diff --git a/recipes/kexec/files/no-getline-no-fscanf.patch b/recipes/kexec-tools/files/no-getline-no-fscanf.patch
index f2a32b4d0d..f2a32b4d0d 100644
--- a/recipes/kexec/files/no-getline-no-fscanf.patch
+++ b/recipes/kexec-tools/files/no-getline-no-fscanf.patch
diff --git a/recipes/kexec/kexec-tools-dietlibc_2.0.1.bb b/recipes/kexec-tools/kexec-tools-dietlibc_2.0.1.bb
index 0155e72793..4f49755f9d 100644
--- a/recipes/kexec/kexec-tools-dietlibc_2.0.1.bb
+++ b/recipes/kexec-tools/kexec-tools-dietlibc_2.0.1.bb
@@ -1,12 +1,10 @@
-# the binaries are statical linked against klibc
+# the binaries are statical linked against dietlibc
require kexec-tools2.inc
SRC_URI += "file://dietlibc.patch;patch=1"
inherit dietlibc
-#DEFAULT_PREFERENCE = "-1"
-
PR = "r0"
S = "${WORKDIR}/kexec-tools-${PV}"
diff --git a/recipes/kexec/kexec-tools-klibc-static_1.101.bb b/recipes/kexec-tools/kexec-tools-klibc-static_1.101.bb
index 5d131d839e..3ee699b1b3 100644
--- a/recipes/kexec/kexec-tools-klibc-static_1.101.bb
+++ b/recipes/kexec-tools/kexec-tools-klibc-static_1.101.bb
@@ -1,7 +1,7 @@
# the binaries are statical linked against klibc
require kexec-tools.inc
-PR = "r6"
+PR = "r7"
DEPENDS = "klibc"
SRC_URI += "file://kexec-static.patch;patch=1 \
@@ -13,11 +13,6 @@ EXTRA_OECONF = " --without-zlib"
export CC=${TARGET_PREFIX}klcc
-# standart oe cflags don't work with klcc
-export CFLAGS=""
-export CPPFLAGS=""
-export LDFLAGS=""
-
PACKAGES =+ "kexec-klibc-static kdump-klibc-static"
FILES_kexec-klibc-static = "${sbindir}/kexec"
diff --git a/recipes/kexec/kexec-tools-klibc-static_2.0.1.bb b/recipes/kexec-tools/kexec-tools-klibc-static_2.0.1.bb
index 54e4601d20..1c2327f82e 100644
--- a/recipes/kexec/kexec-tools-klibc-static_2.0.1.bb
+++ b/recipes/kexec-tools/kexec-tools-klibc-static_2.0.1.bb
@@ -3,7 +3,7 @@ require kexec-tools2.inc
DEFAULT_PREFERENCE = "1"
-PR = "r2"
+PR = "r3"
DEPENDS = "klibc"
SRC_URI += "file://kexec-tools-2-headers.patch;patch=1 \
@@ -16,11 +16,6 @@ EXTRA_OECONF = " --without-zlib"
export CC=${TARGET_PREFIX}klcc
-# standart oe cflags don't work with klcc
-export CFLAGS=""
-export CPPFLAGS=""
-export LDFLAGS=""
-
PACKAGES =+ "kexec-klibc-static kdump-klibc-static"
FILES_kexec-klibc-static = "${sbindir}/kexec"
diff --git a/recipes/kexec/kexec-tools.inc b/recipes/kexec-tools/kexec-tools.inc
index f806e71480..f806e71480 100644
--- a/recipes/kexec/kexec-tools.inc
+++ b/recipes/kexec-tools/kexec-tools.inc
diff --git a/recipes/kexec/kexec-tools2.inc b/recipes/kexec-tools/kexec-tools2.inc
index a21afe4003..a21afe4003 100644
--- a/recipes/kexec/kexec-tools2.inc
+++ b/recipes/kexec-tools/kexec-tools2.inc
diff --git a/recipes/kexec/kexec-tools_1.101.bb b/recipes/kexec-tools/kexec-tools_1.101.bb
index f74c7bcd83..f74c7bcd83 100644
--- a/recipes/kexec/kexec-tools_1.101.bb
+++ b/recipes/kexec-tools/kexec-tools_1.101.bb
diff --git a/recipes/kexec/kexec-tools_2.0.1.bb b/recipes/kexec-tools/kexec-tools_2.0.1.bb
index 606b3fd634..606b3fd634 100644
--- a/recipes/kexec/kexec-tools_2.0.1.bb
+++ b/recipes/kexec-tools/kexec-tools_2.0.1.bb
diff --git a/recipes/kexecboot/kexecboot.inc b/recipes/kexecboot/kexecboot.inc
index ebd193beea..679c4a378a 100644
--- a/recipes/kexecboot/kexecboot.inc
+++ b/recipes/kexecboot/kexecboot.inc
@@ -14,10 +14,12 @@ SRC_URI += "file://logo-img.h \
export CC=${TARGET_PREFIX}klcc
-# standard oe cflags don't work with klcc
-export CFLAGS = ""
-export CPPFLAGS = ""
-export LDFLAGS = ""
+# for output on serial console: --enable-debug
+EXTRA_OECONF_akita = "--with-zaurus"
+EXTRA_OECONF_c7x0 = "--with-zaurus"
+EXTRA_OECONF_spitz = "--with-zaurus"
+EXTRA_OECONF_poodle = "--with-zaurus"
+EXTRA_OECONF_tosa = "--with-zaurus"
do_configure_prepend () {
install -m 0644 ${WORKDIR}/logo-img.h ${S}/res/
diff --git a/recipes/kexecboot/kexecboot_git.bb b/recipes/kexecboot/kexecboot_git.bb
index 02e6d729ec..561706e9f6 100644
--- a/recipes/kexecboot/kexecboot_git.bb
+++ b/recipes/kexecboot/kexecboot_git.bb
@@ -1,8 +1,8 @@
PV = "0.5"
-PR = "r8+gitr${SRCREV}"
+PR = "r9+gitr${SRCREV}"
SRC_URI = "git://git.linuxtogo.org/home/groups/kexecboot/kexecboot.git;protocol=git "
-SRCREV = "456e052144f58d3c1116a38cbb5caa44c070bce8"
+SRCREV = "daa68b5aaa56a036c24c7929e239ebd5edf8fdfe"
S = "${WORKDIR}/git"
diff --git a/recipes/klibc/klibc-1.5.15/isystem.patch b/recipes/klibc/klibc-1.5.15/isystem.patch
new file mode 100644
index 0000000000..2ec40c16c2
--- /dev/null
+++ b/recipes/klibc/klibc-1.5.15/isystem.patch
@@ -0,0 +1,13 @@
+Index: klibc-1.5.15/klcc/klcc.in
+===================================================================
+--- klibc-1.5.15.orig/klcc/klcc.in 2009-12-14 00:32:41.373661102 +0100
++++ klibc-1.5.15/klcc/klcc.in 2009-12-14 00:34:20.855735356 +0100
+@@ -147,7 +147,7 @@
+ } elsif ( $a =~ /^-([fmwWQdO]|std=|ansi|pedantic|M[GPD]|MMD)/ ) {
+ # Options to gcc
+ push(@ccopt, $a);
+- } elsif ( $a =~ /^-([DUI]|M[FQT])(.*)$/ ) {
++ } elsif ( $a =~ /^-([DUI]|M[FQT]|isystem)(.*)$/ ) {
+ # Options to gcc, which can take either a conjoined argument
+ # (-DFOO) or a disjoint argument (-D FOO)
+ push(@ccopt, $a);
diff --git a/recipes/klibc/klibc_1.5.15.bb b/recipes/klibc/klibc_1.5.15.bb
index 777dcd2c3c..97898f2b8e 100644
--- a/recipes/klibc/klibc_1.5.15.bb
+++ b/recipes/klibc/klibc_1.5.15.bb
@@ -1,4 +1,4 @@
require klibc_1.5.15.inc
-PR = "r4"
+PR = "r5"
KLIBC_FETCHDIR = "Testing"
diff --git a/recipes/klibc/klibc_1.5.15.inc b/recipes/klibc/klibc_1.5.15.inc
index f6f5b879ef..6615ac2899 100644
--- a/recipes/klibc/klibc_1.5.15.inc
+++ b/recipes/klibc/klibc_1.5.15.inc
@@ -4,7 +4,8 @@ SRC_URI += "file://staging.patch;patch=1 \
file://klibc_kexecsyscall.patch;patch=1 \
file://mntproc-definitions.patch;patch=1 \
file://signal-cleanup.patch;patch=1 \
- "
+ file://isystem.patch;patch=1 \
+ "
# we want only the shared programms and the lib so we chose them manually
do_install() {
diff --git a/recipes/linux-libc-headers/linux-libc-headers-2.6.32/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch b/recipes/linux-libc-headers/linux-libc-headers-2.6.32/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
new file mode 100644
index 0000000000..1dee314700
--- /dev/null
+++ b/recipes/linux-libc-headers/linux-libc-headers-2.6.32/0001-implement-TIF_RESTORE_SIGMASK-support-and-enable-the.patch
@@ -0,0 +1,429 @@
+Path: news.gmane.org!not-for-mail
+From: Mikael Pettersson <mikpe@it.uu.se>
+Newsgroups: gmane.linux.ports.arm.kernel
+Subject: [PATCH][v3] ARM support for
+ TIF_RESTORE_SIGMASK/pselect6/ppoll/epoll_pwait
+Date: Sat, 15 Aug 2009 13:09:28 +0200 (MEST)
+Lines: 362
+Approved: news@gmane.org
+Message-ID: <200908151109.n7FB9Sbs000150@pilspetsen.it.uu.se>
+NNTP-Posting-Host: lo.gmane.org
+Mime-Version: 1.0
+Content-Type: text/plain; charset="us-ascii"
+Content-Transfer-Encoding: 7bit
+X-Trace: ger.gmane.org 1250334940 22899 80.91.229.12 (15 Aug 2009 11:15:40 GMT)
+X-Complaints-To: usenet@ger.gmane.org
+NNTP-Posting-Date: Sat, 15 Aug 2009 11:15:40 +0000 (UTC)
+To: linux-arm-kernel@lists.arm.linux.org.uk
+Original-X-From: linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.arm.linux.org.uk Sat Aug 15 13:15:33 2009
+Return-path: <linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.arm.linux.org.uk>
+Envelope-to: linux-arm-kernel@m.gmane.org
+Original-Received: from zeniv.linux.org.uk ([195.92.253.2])
+ by lo.gmane.org with esmtp (Exim 4.50)
+ id 1McHER-0000U2-PG
+ for linux-arm-kernel@m.gmane.org; Sat, 15 Aug 2009 13:15:31 +0200
+Original-Received: from [2002:4e20:1eda:1:201:3dff:fe00:156] (helo=lists.arm.linux.org.uk)
+ by ZenIV.linux.org.uk with esmtpsa (Exim 4.69 #1 (Red Hat Linux))
+ id 1McH9u-0005LV-3W; Sat, 15 Aug 2009 11:10:57 +0000
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+ d=arm.linux.org.uk; s=lists; h=Date:Message-Id:From:To:Subject:
+ List-Id:List-Unsubscribe:List-Archive:List-Post:List-Help:
+ List-Subscribe:MIME-Version:Content-Type:
+ Content-Transfer-Encoding; bh=RVq9hPAexlUcptd3h6j5nomOyoEIjcDWAP
+ VeJtlhsa4=; b=HAoqRMC9svNzKlR0f938E0cRlLoJZvG2aSun171woWZAClu/Ud
+ IYN0iD2vm4eF+kYQkTYD6PGFCMbNnugmZe6gCpS0QhijoKyOzRBdykQHJuOLQtLE
+ q9KJzp24cQ/vwsV2+O1OqcmIFboIg2L5JQaUuv0djS7OkdspH0ysmi1lg=
+Original-Received: from [::1] (helo=lists.arm.linux.org.uk)
+ by lists.arm.linux.org.uk with esmtp (Exim 4.69)
+ (envelope-from <linux-arm-kernel-bounces@lists.arm.linux.org.uk>)
+ id 1McH9o-0002us-34; Sat, 15 Aug 2009 12:10:44 +0100
+Original-Received: from aun.it.uu.se ([130.238.12.36] ident=root)
+ by lists.arm.linux.org.uk with esmtps (TLSv1:AES256-SHA:256)
+ (Exim 4.69) (envelope-from <mikpe@user.it.uu.se>) id 1McH8h-0002uj-Ck
+ for linux-arm-kernel@lists.arm.linux.org.uk;
+ Sat, 15 Aug 2009 12:09:35 +0100
+Original-Received: from pilspetsen.it.uu.se (daemon@pilspetsen.it.uu.se [130.238.18.39])
+ by aun.it.uu.se (8.13.6/8.13.6) with ESMTP id n7FB9S04005839
+ for <linux-arm-kernel@lists.arm.linux.org.uk>;
+ Sat, 15 Aug 2009 13:09:28 +0200 (MEST)
+Original-Received: (from mikpe@localhost)
+ by pilspetsen.it.uu.se (8.13.8+Sun/8.13.7) id n7FB9Sbs000150
+ for linux-arm-kernel@lists.arm.linux.org.uk;
+ Sat, 15 Aug 2009 13:09:28 +0200 (MEST)
+X-BeenThere: linux-arm-kernel@lists.arm.linux.org.uk
+X-Mailman-Version: 2.1.9
+Precedence: list
+List-Id: ARM Linux kernel discussions <linux-arm-kernel.lists.arm.linux.org.uk>
+List-Unsubscribe: <http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.arm.linux.org.uk?subject=unsubscribe>
+List-Archive: <http://lists.arm.linux.org.uk/lurker/list/linux-arm-kernel.html>
+List-Post: <mailto:linux-arm-kernel@lists.arm.linux.org.uk>
+List-Help: <mailto:linux-arm-kernel-request@lists.arm.linux.org.uk?subject=help>
+List-Subscribe: <http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.arm.linux.org.uk?subject=subscribe>
+Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.arm.linux.org.uk
+Xref: news.gmane.org gmane.linux.ports.arm.kernel:64340
+Archived-At: <http://permalink.gmane.org/gmane.linux.ports.arm.kernel/64340>
+
+This patch adds support for TIF_RESTORE_SIGMASK to ARM's signal handling,
+which allows to hook up the pselect6, ppoll, and epoll_pwait syscalls on ARM.
+
+Tested here with eabi userspace and a test program with a deliberate race
+between a child's exit and the parent's sigprocmask/select sequence.
+Using sys_pselect6() instead of sigprocmask/select reliably prevents the race.
+
+The other arch's support for TIF_RESTORE_SIGMASK has evolved over time:
+
+In 2.6.16:
+- add TIF_RESTORE_SIGMASK which parallels TIF_SIGPENDING
+- test both when checking for pending signal [bad, changed later]
+- reimplement sys_sigsuspend() to use current->saved_sigmask,
+ TIF_RESTORE_SIGMASK [changed later], and -ERESTARTNOHAND;
+ ditto for sys_rt_sigsuspend(), but drop private code and
+ use common code via __ARCH_WANT_SYS_RT_SIGSUSPEND;
+- there are now no "extra" calls to do_signal() so its oldset
+ parameter is always &current->blocked so need not be passed,
+ also its return value is changed to void
+- change handle_signal() to return 0/-errno
+- change do_signal() to honor TIF_RESTORE_SIGMASK:
+ + get oldset from current->saved_sigmask if TIF_RESTORE_SIGMASK is set
+ + if handle_signal() was successful then clear TIF_RESTORE_SIGMASK
+ + if no signal was delivered and TIF_RESTORE_SIGMASK is set then
+ clear it and restore the sigmask
+- hook up sys_pselect6() and sys_ppoll()
+
+In 2.6.19:
+- hook up sys_epoll_pwait()
+
+In 2.6.26:
+- allow archs to override how TIF_RESTORE_SIGMASK is implemented;
+ default set_restore_sigmask() sets both TIF_RESTORE_SIGMASK and
+ TIF_SIGPENDING; archs need now just test TIF_SIGPENDING again
+ when checking for pending signal work; some archs now implement
+ TIF_RESTORE_SIGMASK as a secondary/non-atomic thread flag bit
+- call set_restore_sigmask() in sys_sigsuspend() instead of setting
+ TIF_RESTORE_SIGMASK
+
+In 2.6.29-rc:
+- kill sys_pselect7() which no arch wanted
+
+So for 2.6.31-rc6/ARM this patch does the following:
+- Add TIF_RESTORE_SIGMASK. Use the generic set_restore_sigmask()
+ which sets both TIF_SIGPENDING and TIF_RESTORE_SIGMASK, so
+ TIF_RESTORE_SIGMASK need not claim one of the scarce low thread
+ flags, and existing TIF_SIGPENDING and _TIF_WORK_MASK tests need
+ not be extended for TIF_RESTORE_SIGMASK.
+- sys_sigsuspend() is reimplemented to use current->saved_sigmask and
+ set_restore_sigmask(), making it identical to most other archs.
+- The private code for sys_rt_sigsuspend() is removed, instead generic
+ code supplies it via __ARCH_WANT_SYS_RT_SIGSUSPEND.
+- sys_sigsuspend() and sys_rt_sigsuspend() no longer need a pt_regs
+ parameter, so their assembly code wrappers are removed.
+- handle_signal() is changed to return 0 on success or -errno.
+- The oldset parameter to do_signal() is now redundant and removed,
+ and the return value is now also redundant so it's changed to void.
+- do_signal() is changed to honor TIF_RESTORE_SIGMASK:
+ + get oldset from current->saved_sigmask if TIF_RESTORE_SIGMASK is set
+ + if handle_signal() was successful then clear TIF_RESTORE_SIGMASK
+ + if no signal was delivered and TIF_RESTORE_SIGMASK is set then
+ clear it and restore the sigmask
+- Hook up sys_pselect6, sys_ppoll, and sys_epoll_pwait.
+
+Signed-off-by: Mikael Pettersson
+---
+Changes from previous versions:
+
+v2 (only posted as incremental diff against v1):
+- moved sigmask restoration in no-signal-delivered case into
+ "if (syscall)" block for extra protection against exception
+ path also invoking do_signal()
+v3 (this version):
+- moved oldset initialisation into "if (signr > 0)" block,
+ as requested by Russell
+
+ arch/arm/include/asm/thread_info.h | 2
+ arch/arm/include/asm/unistd.h | 7 +--
+ arch/arm/kernel/calls.S | 10 ++--
+ arch/arm/kernel/entry-common.S | 10 ----
+ arch/arm/kernel/signal.c | 86 +++++++++++++++----------------------
+ 5 files changed, 48 insertions(+), 67 deletions(-)
+
+diff -rupN linux-2.6.31-rc6/arch/arm/include/asm/thread_info.h linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/thread_info.h
+--- linux-2.6.31-rc6/arch/arm/include/asm/thread_info.h 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/thread_info.h 2009-08-14 11:26:44.000000000 +0200
+@@ -140,6 +140,7 @@ extern void vfp_sync_state(struct thread
+ #define TIF_USING_IWMMXT 17
+ #define TIF_MEMDIE 18
+ #define TIF_FREEZE 19
++#define TIF_RESTORE_SIGMASK 20
+
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+@@ -147,6 +148,7 @@ extern void vfp_sync_state(struct thread
+ #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
+ #define _TIF_FREEZE (1 << TIF_FREEZE)
++#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff -rupN linux-2.6.31-rc6/arch/arm/include/asm/unistd.h linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/unistd.h
+--- linux-2.6.31-rc6/arch/arm/include/asm/unistd.h 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/include/asm/unistd.h 2009-08-14 11:26:44.000000000 +0200
+@@ -360,8 +360,8 @@
+ #define __NR_readlinkat (__NR_SYSCALL_BASE+332)
+ #define __NR_fchmodat (__NR_SYSCALL_BASE+333)
+ #define __NR_faccessat (__NR_SYSCALL_BASE+334)
+- /* 335 for pselect6 */
+- /* 336 for ppoll */
++#define __NR_pselect6 (__NR_SYSCALL_BASE+335)
++#define __NR_ppoll (__NR_SYSCALL_BASE+336)
+ #define __NR_unshare (__NR_SYSCALL_BASE+337)
+ #define __NR_set_robust_list (__NR_SYSCALL_BASE+338)
+ #define __NR_get_robust_list (__NR_SYSCALL_BASE+339)
+@@ -372,7 +372,7 @@
+ #define __NR_vmsplice (__NR_SYSCALL_BASE+343)
+ #define __NR_move_pages (__NR_SYSCALL_BASE+344)
+ #define __NR_getcpu (__NR_SYSCALL_BASE+345)
+- /* 346 for epoll_pwait */
++#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346)
+ #define __NR_kexec_load (__NR_SYSCALL_BASE+347)
+ #define __NR_utimensat (__NR_SYSCALL_BASE+348)
+ #define __NR_signalfd (__NR_SYSCALL_BASE+349)
+@@ -432,6 +432,7 @@
+ #define __ARCH_WANT_SYS_SIGPENDING
+ #define __ARCH_WANT_SYS_SIGPROCMASK
+ #define __ARCH_WANT_SYS_RT_SIGACTION
++#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+
+ #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
+ #define __ARCH_WANT_SYS_TIME
+diff -rupN linux-2.6.31-rc6/arch/arm/kernel/calls.S linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/calls.S
+--- linux-2.6.31-rc6/arch/arm/kernel/calls.S 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/calls.S 2009-08-14 11:26:44.000000000 +0200
+@@ -81,7 +81,7 @@
+ CALL(sys_ni_syscall) /* was sys_ssetmask */
+ /* 70 */ CALL(sys_setreuid16)
+ CALL(sys_setregid16)
+- CALL(sys_sigsuspend_wrapper)
++ CALL(sys_sigsuspend)
+ CALL(sys_sigpending)
+ CALL(sys_sethostname)
+ /* 75 */ CALL(sys_setrlimit)
+@@ -188,7 +188,7 @@
+ CALL(sys_rt_sigpending)
+ CALL(sys_rt_sigtimedwait)
+ CALL(sys_rt_sigqueueinfo)
+- CALL(sys_rt_sigsuspend_wrapper)
++ CALL(sys_rt_sigsuspend)
+ /* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64))
+ CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
+ CALL(sys_chown16)
+@@ -344,8 +344,8 @@
+ CALL(sys_readlinkat)
+ CALL(sys_fchmodat)
+ CALL(sys_faccessat)
+-/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */
+- CALL(sys_ni_syscall) /* eventually ppoll */
++/* 335 */ CALL(sys_pselect6)
++ CALL(sys_ppoll)
+ CALL(sys_unshare)
+ CALL(sys_set_robust_list)
+ CALL(sys_get_robust_list)
+@@ -355,7 +355,7 @@
+ CALL(sys_vmsplice)
+ CALL(sys_move_pages)
+ /* 345 */ CALL(sys_getcpu)
+- CALL(sys_ni_syscall) /* eventually epoll_pwait */
++ CALL(sys_epoll_pwait)
+ CALL(sys_kexec_load)
+ CALL(sys_utimensat)
+ CALL(sys_signalfd)
+diff -rupN linux-2.6.31-rc6/arch/arm/kernel/entry-common.S linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/entry-common.S
+--- linux-2.6.31-rc6/arch/arm/kernel/entry-common.S 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/entry-common.S 2009-08-14 11:26:44.000000000 +0200
+@@ -373,16 +373,6 @@ sys_clone_wrapper:
+ b sys_clone
+ ENDPROC(sys_clone_wrapper)
+
+-sys_sigsuspend_wrapper:
+- add r3, sp, #S_OFF
+- b sys_sigsuspend
+-ENDPROC(sys_sigsuspend_wrapper)
+-
+-sys_rt_sigsuspend_wrapper:
+- add r2, sp, #S_OFF
+- b sys_rt_sigsuspend
+-ENDPROC(sys_rt_sigsuspend_wrapper)
+-
+ sys_sigreturn_wrapper:
+ add r0, sp, #S_OFF
+ b sys_sigreturn
+diff -rupN linux-2.6.31-rc6/arch/arm/kernel/signal.c linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/signal.c
+--- linux-2.6.31-rc6/arch/arm/kernel/signal.c 2009-08-14 11:11:10.000000000 +0200
++++ linux-2.6.31-rc6.arm-restore-sigmask-v3/arch/arm/kernel/signal.c 2009-08-14 11:26:44.000000000 +0200
+@@ -47,57 +47,22 @@ const unsigned long sigreturn_codes[7] =
+ MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
+ };
+
+-static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
+-
+ /*
+ * atomically swap in the new signal mask, and wait for a signal.
+ */
+-asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
++asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
+ {
+- sigset_t saveset;
+-
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+- saveset = current->blocked;
++ current->saved_sigmask = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+- regs->ARM_r0 = -EINTR;
+-
+- while (1) {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule();
+- if (do_signal(&saveset, regs, 0))
+- return regs->ARM_r0;
+- }
+-}
+-
+-asmlinkage int
+-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+-{
+- sigset_t saveset, newset;
+-
+- /* XXX: Don't preclude handling different sized sigset_t's. */
+- if (sigsetsize != sizeof(sigset_t))
+- return -EINVAL;
+-
+- if (copy_from_user(&newset, unewset, sizeof(newset)))
+- return -EFAULT;
+- sigdelsetmask(&newset, ~_BLOCKABLE);
+-
+- spin_lock_irq(&current->sighand->siglock);
+- saveset = current->blocked;
+- current->blocked = newset;
+- recalc_sigpending();
+- spin_unlock_irq(&current->sighand->siglock);
+- regs->ARM_r0 = -EINTR;
+
+- while (1) {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule();
+- if (do_signal(&saveset, regs, 0))
+- return regs->ARM_r0;
+- }
++ current->state = TASK_INTERRUPTIBLE;
++ schedule();
++ set_restore_sigmask();
++ return -ERESTARTNOHAND;
+ }
+
+ asmlinkage int
+@@ -545,7 +510,7 @@ static inline void setup_syscall_restart
+ /*
+ * OK, we're invoking a handler
+ */
+-static void
++static int
+ handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs * regs, int syscall)
+@@ -596,7 +561,7 @@ handle_signal(unsigned long sig, struct
+
+ if (ret != 0) {
+ force_sigsegv(sig, tsk);
+- return;
++ return ret;
+ }
+
+ /*
+@@ -610,6 +575,7 @@ handle_signal(unsigned long sig, struct
+ recalc_sigpending();
+ spin_unlock_irq(&tsk->sighand->siglock);
+
++ return 0;
+ }
+
+ /*
+@@ -621,7 +587,7 @@ handle_signal(unsigned long sig, struct
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+-static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
++static void do_signal(struct pt_regs *regs, int syscall)
+ {
+ struct k_sigaction ka;
+ siginfo_t info;
+@@ -634,7 +600,7 @@ static int do_signal(sigset_t *oldset, s
+ * if so.
+ */
+ if (!user_mode(regs))
+- return 0;
++ return;
+
+ if (try_to_freeze())
+ goto no_signal;
+@@ -643,9 +609,24 @@ static int do_signal(sigset_t *oldset, s
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+- handle_signal(signr, &ka, &info, oldset, regs, syscall);
++ sigset_t *oldset;
++
++ if (test_thread_flag(TIF_RESTORE_SIGMASK))
++ oldset = &current->saved_sigmask;
++ else
++ oldset = &current->blocked;
++ if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
++ /*
++ * A signal was successfully delivered; the saved
++ * sigmask will have been stored in the signal frame,
++ * and will be restored by sigreturn, so we can simply
++ * clear the TIF_RESTORE_SIGMASK flag.
++ */
++ if (test_thread_flag(TIF_RESTORE_SIGMASK))
++ clear_thread_flag(TIF_RESTORE_SIGMASK);
++ }
+ single_step_set(current);
+- return 1;
++ return;
+ }
+
+ no_signal:
+@@ -697,14 +678,21 @@ static int do_signal(sigset_t *oldset, s
+ regs->ARM_r0 == -ERESTARTNOINTR) {
+ setup_syscall_restart(regs);
+ }
++
++ /* If there's no signal to deliver, we just put the saved sigmask
++ * back.
++ */
++ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
++ clear_thread_flag(TIF_RESTORE_SIGMASK);
++ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++ }
+ }
+ single_step_set(current);
+- return 0;
+ }
+
+ asmlinkage void
+ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ if (thread_flags & _TIF_SIGPENDING)
+- do_signal(&current->blocked, regs, syscall);
++ do_signal(regs, syscall);
+ }
+
+-------------------------------------------------------------------
+List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
+FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
+Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php
+
diff --git a/recipes/linux-libc-headers/linux-libc-headers_2.6.32.bb b/recipes/linux-libc-headers/linux-libc-headers_2.6.32.bb
new file mode 100644
index 0000000000..6e1fdb0996
--- /dev/null
+++ b/recipes/linux-libc-headers/linux-libc-headers_2.6.32.bb
@@ -0,0 +1,50 @@
+require linux-libc-headers.inc
+
+INHIBIT_DEFAULT_DEPS = "1"
+DEPENDS += "unifdef-native"
+PR = "r0"
+
+DEFAULT_PREFERENCE = "-1"
+
+SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ "
+S = "${WORKDIR}/linux-${PV}"
+
+set_arch() {
+ case ${TARGET_ARCH} in
+ alpha*) ARCH=alpha ;;
+ arm*) ARCH=arm ;;
+ cris*) ARCH=cris ;;
+ hppa*) ARCH=parisc ;;
+ i*86*) ARCH=i386 ;;
+ ia64*) ARCH=ia64 ;;
+ mips*) ARCH=mips ;;
+ m68k*) ARCH=m68k ;;
+ powerpc*) ARCH=powerpc ;;
+ s390*) ARCH=s390 ;;
+ sh*) ARCH=sh ;;
+ sparc64*) ARCH=sparc64 ;;
+ sparc*) ARCH=sparc ;;
+ x86_64*) ARCH=x86_64 ;;
+ avr32*) ARCH=avr32 ;;
+ bfin*) ARCH=blackfin ;;
+ esac
+}
+
+do_configure() {
+ set_arch
+ oe_runmake allnoconfig ARCH=$ARCH
+}
+
+do_compile () {
+}
+
+do_install() {
+ set_arch
+ oe_runmake headers_install INSTALL_HDR_PATH=${D}${exec_prefix} ARCH=$ARCH
+}
+
+do_stage () {
+ set_arch
+ oe_runmake headers_install INSTALL_HDR_PATH=${STAGING_DIR_HOST}${layout_prefix} ARCH=$ARCH
+}
diff --git a/recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch b/recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch
new file mode 100644
index 0000000000..4871601c97
--- /dev/null
+++ b/recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch
@@ -0,0 +1,29 @@
+From fc31c7716355a226b8ed4e16f4581e5c8fa53570 Mon Sep 17 00:00:00 2001
+From: Mike Frysinger <vapier@gentoo.org>
+Date: Thu, 17 May 2007 14:57:20 -0400
+Subject: [PATCH] kbuild: include limits.h in sumversion.c for PATH_MAX
+
+POSIX says limits.h defines PATH_MAX so we should include it (which fixes
+compiling on some systems like OS X).
+
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+---
+ scripts/mod/sumversion.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
+index 6873d5a..d9cc690 100644
+--- a/scripts/mod/sumversion.c
++++ b/scripts/mod/sumversion.c
+@@ -7,6 +7,7 @@
+ #include <ctype.h>
+ #include <errno.h>
+ #include <string.h>
++#include <limits.h>
+ #include "modpost.h"
+
+ /*
+--
+1.6.3.3
+
diff --git a/recipes/linux/linux-2.6.22.6/mx31moboard/defconfig b/recipes/linux/linux-2.6.22/mx31moboard/defconfig
index 848ec6b4ce..848ec6b4ce 100644
--- a/recipes/linux/linux-2.6.22.6/mx31moboard/defconfig
+++ b/recipes/linux/linux-2.6.22/mx31moboard/defconfig
diff --git a/recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch b/recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch
deleted file mode 100644
index a2f6e1765e..0000000000
--- a/recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch
+++ /dev/null
@@ -1,19 +0,0 @@
----
- include/linux/time.h | 4 ++++
- 1 files changed, 4 insertions(+), 0 deletions(-)
-diff --git a/include/linux/time.h b/include/linux/time.h
-index 2091a19..d32ef0a 100644
---- a/include/linux/time.h
-+++ b/include/linux/time.h
-@@ -173,6 +173,10 @@ static inline void timespec_add_ns(struct timespec *a, u64 ns)
- {
- ns += a->tv_nsec;
- while(unlikely(ns >= NSEC_PER_SEC)) {
-+ /* The following asm() prevents the compiler from
-+ * optimising this loop into a modulo operation. */
-+ asm("" : "+r"(ns));
-+
- ns -= NSEC_PER_SEC;
- a->tv_sec++;
- }
-
diff --git a/recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch b/recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch
new file mode 100644
index 0000000000..77ee5c8f1d
--- /dev/null
+++ b/recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch
@@ -0,0 +1,8567 @@
+---
+ Documentation/sched-design-CFS.txt | 67 +
+ Makefile | 2
+ arch/i386/Kconfig | 11
+ drivers/kvm/kvm.h | 10
+ fs/pipe.c | 9
+ fs/proc/array.c | 21
+ fs/proc/base.c | 2
+ fs/proc/proc_misc.c | 15
+ include/linux/cgroup.h | 12
+ include/linux/cpuset.h | 5
+ include/linux/kernel.h | 7
+ include/linux/kernel_stat.h | 3
+ include/linux/nodemask.h | 94 +
+ include/linux/sched.h | 174 ++
+ include/linux/taskstats.h | 7
+ include/linux/topology.h | 5
+ init/Kconfig | 26
+ init/main.c | 3
+ kernel/delayacct.c | 8
+ kernel/exit.c | 6
+ kernel/fork.c | 5
+ kernel/ksysfs.c | 8
+ kernel/sched.c | 2310 +++++++++++++++++++++++--------------
+ kernel/sched_debug.c | 289 +++-
+ kernel/sched_fair.c | 885 ++++++--------
+ kernel/sched_idletask.c | 26
+ kernel/sched_rt.c | 54
+ kernel/sched_stats.h | 40
+ kernel/sysctl.c | 40
+ kernel/timer.c | 7
+ kernel/tsacct.c | 4
+ kernel/user.c | 249 +++
+ mm/memory_hotplug.c | 7
+ mm/page_alloc.c | 50
+ mm/vmscan.c | 4
+ net/unix/af_unix.c | 4
+ 36 files changed, 2883 insertions(+), 1586 deletions(-)
+
+--- linux-2.6.23.orig/Documentation/sched-design-CFS.txt
++++ linux-2.6.23/Documentation/sched-design-CFS.txt
+@@ -115,5 +115,72 @@ Some implementation details:
+ - reworked/sanitized SMP load-balancing: the runqueue-walking
+ assumptions are gone from the load-balancing code now, and
+ iterators of the scheduling modules are used. The balancing code got
+ quite a bit simpler as a result.
+
++
++Group scheduler extension to CFS
++================================
++
++Normally the scheduler operates on individual tasks and strives to provide
++fair CPU time to each task. Sometimes, it may be desirable to group tasks
++and provide fair CPU time to each such task group. For example, it may
++be desirable to first provide fair CPU time to each user on the system
++and then to each task belonging to a user.
++
++CONFIG_FAIR_GROUP_SCHED strives to achieve exactly that. It lets
++SCHED_NORMAL/BATCH tasks be be grouped and divides CPU time fairly among such
++groups. At present, there are two (mutually exclusive) mechanisms to group
++tasks for CPU bandwidth control purpose:
++
++ - Based on user id (CONFIG_FAIR_USER_SCHED)
++ In this option, tasks are grouped according to their user id.
++ - Based on "cgroup" pseudo filesystem (CONFIG_FAIR_CGROUP_SCHED)
++ This options lets the administrator create arbitrary groups
++ of tasks, using the "cgroup" pseudo filesystem. See
++ Documentation/cgroups.txt for more information about this
++ filesystem.
++
++Only one of these options to group tasks can be chosen and not both.
++
++Group scheduler tunables:
++
++When CONFIG_FAIR_USER_SCHED is defined, a directory is created in sysfs for
++each new user and a "cpu_share" file is added in that directory.
++
++ # cd /sys/kernel/uids
++ # cat 512/cpu_share # Display user 512's CPU share
++ 1024
++ # echo 2048 > 512/cpu_share # Modify user 512's CPU share
++ # cat 512/cpu_share # Display user 512's CPU share
++ 2048
++ #
++
++CPU bandwidth between two users are divided in the ratio of their CPU shares.
++For ex: if you would like user "root" to get twice the bandwidth of user
++"guest", then set the cpu_share for both the users such that "root"'s
++cpu_share is twice "guest"'s cpu_share
++
++
++When CONFIG_FAIR_CGROUP_SCHED is defined, a "cpu.shares" file is created
++for each group created using the pseudo filesystem. See example steps
++below to create task groups and modify their CPU share using the "cgroups"
++pseudo filesystem
++
++ # mkdir /dev/cpuctl
++ # mount -t cgroup -ocpu none /dev/cpuctl
++ # cd /dev/cpuctl
++
++ # mkdir multimedia # create "multimedia" group of tasks
++ # mkdir browser # create "browser" group of tasks
++
++ # #Configure the multimedia group to receive twice the CPU bandwidth
++ # #that of browser group
++
++ # echo 2048 > multimedia/cpu.shares
++ # echo 1024 > browser/cpu.shares
++
++ # firefox & # Launch firefox and move it to "browser" group
++ # echo <firefox_pid> > browser/tasks
++
++ # #Launch gmplayer (or your favourite movie player)
++ # echo <movie_player_pid> > multimedia/tasks
+--- linux-2.6.23.orig/Makefile
++++ linux-2.6.23/Makefile
+@@ -1,9 +1,9 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 23
+-EXTRAVERSION = .17
++EXTRAVERSION = .17-cfs-v24.1
+ NAME = Arr Matey! A Hairy Bilge Rat!
+
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+ # More info can be located in ./README
+--- linux-2.6.23.orig/arch/i386/Kconfig
++++ linux-2.6.23/arch/i386/Kconfig
+@@ -212,10 +212,21 @@ config X86_ES7000
+ Only choose this option if you have such a system, otherwise you
+ should say N here.
+
+ endchoice
+
++config SCHED_NO_NO_OMIT_FRAME_POINTER
++ bool "Single-depth WCHAN output"
++ default y
++ help
++ Calculate simpler /proc/<PID>/wchan values. If this option
++ is disabled then wchan values will recurse back to the
++ caller function. This provides more accurate wchan values,
++ at the expense of slightly more scheduling overhead.
++
++ If in doubt, say "Y".
++
+ config PARAVIRT
+ bool "Paravirtualization support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+--- linux-2.6.23.orig/drivers/kvm/kvm.h
++++ linux-2.6.23/drivers/kvm/kvm.h
+@@ -623,10 +623,20 @@ void __kvm_mmu_free_some_pages(struct kv
+ int kvm_mmu_load(struct kvm_vcpu *vcpu);
+ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+
+ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
++static inline void kvm_guest_enter(void)
++{
++ current->flags |= PF_VCPU;
++}
++
++static inline void kvm_guest_exit(void)
++{
++ current->flags &= ~PF_VCPU;
++}
++
+ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+ u32 error_code)
+ {
+ return vcpu->mmu.page_fault(vcpu, gva, error_code);
+ }
+--- linux-2.6.23.orig/fs/pipe.c
++++ linux-2.6.23/fs/pipe.c
+@@ -43,12 +43,11 @@ void pipe_wait(struct pipe_inode_info *p
+
+ /*
+ * Pipes are system-local resources, so sleeping on them
+ * is considered a noninteractive wait:
+ */
+- prepare_to_wait(&pipe->wait, &wait,
+- TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
++ prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
+ if (pipe->inode)
+ mutex_unlock(&pipe->inode->i_mutex);
+ schedule();
+ finish_wait(&pipe->wait, &wait);
+ if (pipe->inode)
+@@ -381,11 +380,11 @@ redo:
+ }
+ mutex_unlock(&inode->i_mutex);
+
+ /* Signal writers asynchronously that there is more room. */
+ if (do_wakeup) {
+- wake_up_interruptible(&pipe->wait);
++ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
+ if (ret > 0)
+ file_accessed(filp);
+ return ret;
+@@ -554,11 +553,11 @@ redo2:
+ pipe->waiting_writers--;
+ }
+ out:
+ mutex_unlock(&inode->i_mutex);
+ if (do_wakeup) {
+- wake_up_interruptible(&pipe->wait);
++ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ }
+ if (ret > 0)
+ file_update_time(filp);
+ return ret;
+@@ -648,11 +647,11 @@ pipe_release(struct inode *inode, int de
+ pipe->writers -= decw;
+
+ if (!pipe->readers && !pipe->writers) {
+ free_pipe_info(inode);
+ } else {
+- wake_up_interruptible(&pipe->wait);
++ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
+ mutex_unlock(&inode->i_mutex);
+
+--- linux-2.6.23.orig/fs/proc/array.c
++++ linux-2.6.23/fs/proc/array.c
+@@ -365,15 +365,22 @@ static cputime_t task_stime(struct task_
+ * grows monotonically - apps rely on that):
+ */
+ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+ cputime_to_clock_t(task_utime(p));
+
+- p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
++ if (stime >= 0)
++ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
++
+ return p->prev_stime;
+ }
+ #endif
+
++static cputime_t task_gtime(struct task_struct *p)
++{
++ return p->gtime;
++}
++
+ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
+ {
+ unsigned long vsize, eip, esp, wchan = ~0UL;
+ long priority, nice;
+ int tty_pgrp = -1, tty_nr = 0;
+@@ -385,10 +392,11 @@ static int do_task_stat(struct task_stru
+ struct mm_struct *mm;
+ unsigned long long start_time;
+ unsigned long cmin_flt = 0, cmaj_flt = 0;
+ unsigned long min_flt = 0, maj_flt = 0;
+ cputime_t cutime, cstime, utime, stime;
++ cputime_t cgtime, gtime;
+ unsigned long rsslim = 0;
+ char tcomm[sizeof(task->comm)];
+ unsigned long flags;
+
+ state = *get_task_state(task);
+@@ -403,10 +411,11 @@ static int do_task_stat(struct task_stru
+ get_task_comm(tcomm, task);
+
+ sigemptyset(&sigign);
+ sigemptyset(&sigcatch);
+ cutime = cstime = utime = stime = cputime_zero;
++ cgtime = gtime = cputime_zero;
+
+ rcu_read_lock();
+ if (lock_task_sighand(task, &flags)) {
+ struct signal_struct *sig = task->signal;
+
+@@ -420,27 +429,30 @@ static int do_task_stat(struct task_stru
+
+ cmin_flt = sig->cmin_flt;
+ cmaj_flt = sig->cmaj_flt;
+ cutime = sig->cutime;
+ cstime = sig->cstime;
++ cgtime = sig->cgtime;
+ rsslim = sig->rlim[RLIMIT_RSS].rlim_cur;
+
+ /* add up live thread stats at the group level */
+ if (whole) {
+ struct task_struct *t = task;
+ do {
+ min_flt += t->min_flt;
+ maj_flt += t->maj_flt;
+ utime = cputime_add(utime, task_utime(t));
+ stime = cputime_add(stime, task_stime(t));
++ gtime = cputime_add(gtime, task_gtime(t));
+ t = next_thread(t);
+ } while (t != task);
+
+ min_flt += sig->min_flt;
+ maj_flt += sig->maj_flt;
+ utime = cputime_add(utime, sig->utime);
+ stime = cputime_add(stime, sig->stime);
++ gtime = cputime_add(gtime, sig->gtime);
+ }
+
+ sid = signal_session(sig);
+ pgid = process_group(task);
+ ppid = rcu_dereference(task->real_parent)->tgid;
+@@ -454,10 +466,11 @@ static int do_task_stat(struct task_stru
+ if (!whole) {
+ min_flt = task->min_flt;
+ maj_flt = task->maj_flt;
+ utime = task_utime(task);
+ stime = task_stime(task);
++ gtime = task_gtime(task);
+ }
+
+ /* scale priority and nice values from timeslices to -20..20 */
+ /* to make it look like a "normal" Unix priority/nice value */
+ priority = task_prio(task);
+@@ -471,11 +484,11 @@ static int do_task_stat(struct task_stru
+ /* convert nsec -> ticks */
+ start_time = nsec_to_clock_t(start_time);
+
+ res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \
+ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
+-%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu\n",
++%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
+ task->pid,
+ tcomm,
+ state,
+ ppid,
+ pgid,
+@@ -516,11 +529,13 @@ static int do_task_stat(struct task_stru
+ 0UL,
+ task->exit_signal,
+ task_cpu(task),
+ task->rt_priority,
+ task->policy,
+- (unsigned long long)delayacct_blkio_ticks(task));
++ (unsigned long long)delayacct_blkio_ticks(task),
++ cputime_to_clock_t(gtime),
++ cputime_to_clock_t(cgtime));
+ if (mm)
+ mmput(mm);
+ return res;
+ }
+
+--- linux-2.6.23.orig/fs/proc/base.c
++++ linux-2.6.23/fs/proc/base.c
+@@ -302,11 +302,11 @@ static int proc_pid_wchan(struct task_st
+ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
+ {
+ return sprintf(buffer, "%llu %llu %lu\n",
+ task->sched_info.cpu_time,
+ task->sched_info.run_delay,
+- task->sched_info.pcnt);
++ task->sched_info.pcount);
+ }
+ #endif
+
+ /* The badness from the OOM killer */
+ unsigned long badness(struct task_struct *p, unsigned long uptime);
+--- linux-2.6.23.orig/fs/proc/proc_misc.c
++++ linux-2.6.23/fs/proc/proc_misc.c
+@@ -441,20 +441,22 @@ static const struct file_operations proc
+ static int show_stat(struct seq_file *p, void *v)
+ {
+ int i;
+ unsigned long jif;
+ cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
++ cputime64_t guest;
+ u64 sum = 0;
+ struct timespec boottime;
+ unsigned int *per_irq_sum;
+
+ per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
+ if (!per_irq_sum)
+ return -ENOMEM;
+
+ user = nice = system = idle = iowait =
+ irq = softirq = steal = cputime64_zero;
++ guest = cputime64_zero;
+ getboottime(&boottime);
+ jif = boottime.tv_sec;
+
+ for_each_possible_cpu(i) {
+ int j;
+@@ -465,26 +467,28 @@ static int show_stat(struct seq_file *p,
+ idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
+ iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
+ irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
+ softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
+ steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
++ guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
+ for (j = 0; j < NR_IRQS; j++) {
+ unsigned int temp = kstat_cpu(i).irqs[j];
+ sum += temp;
+ per_irq_sum[j] += temp;
+ }
+ }
+
+- seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n",
++ seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ (unsigned long long)cputime64_to_clock_t(user),
+ (unsigned long long)cputime64_to_clock_t(nice),
+ (unsigned long long)cputime64_to_clock_t(system),
+ (unsigned long long)cputime64_to_clock_t(idle),
+ (unsigned long long)cputime64_to_clock_t(iowait),
+ (unsigned long long)cputime64_to_clock_t(irq),
+ (unsigned long long)cputime64_to_clock_t(softirq),
+- (unsigned long long)cputime64_to_clock_t(steal));
++ (unsigned long long)cputime64_to_clock_t(steal),
++ (unsigned long long)cputime64_to_clock_t(guest));
+ for_each_online_cpu(i) {
+
+ /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
+ user = kstat_cpu(i).cpustat.user;
+ nice = kstat_cpu(i).cpustat.nice;
+@@ -492,20 +496,23 @@ static int show_stat(struct seq_file *p,
+ idle = kstat_cpu(i).cpustat.idle;
+ iowait = kstat_cpu(i).cpustat.iowait;
+ irq = kstat_cpu(i).cpustat.irq;
+ softirq = kstat_cpu(i).cpustat.softirq;
+ steal = kstat_cpu(i).cpustat.steal;
+- seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
++ guest = kstat_cpu(i).cpustat.guest;
++ seq_printf(p,
++ "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ i,
+ (unsigned long long)cputime64_to_clock_t(user),
+ (unsigned long long)cputime64_to_clock_t(nice),
+ (unsigned long long)cputime64_to_clock_t(system),
+ (unsigned long long)cputime64_to_clock_t(idle),
+ (unsigned long long)cputime64_to_clock_t(iowait),
+ (unsigned long long)cputime64_to_clock_t(irq),
+ (unsigned long long)cputime64_to_clock_t(softirq),
+- (unsigned long long)cputime64_to_clock_t(steal));
++ (unsigned long long)cputime64_to_clock_t(steal),
++ (unsigned long long)cputime64_to_clock_t(guest));
+ }
+ seq_printf(p, "intr %llu", (unsigned long long)sum);
+
+ #ifndef CONFIG_SMP
+ /* Touches too many cache lines on SMP setups */
+--- /dev/null
++++ linux-2.6.23/include/linux/cgroup.h
+@@ -0,0 +1,12 @@
++#ifndef _LINUX_CGROUP_H
++#define _LINUX_CGROUP_H
++
++/*
++ * Control groups are not backported - we use a few compatibility
++ * defines to be able to use the upstream sched.c as-is:
++ */
++#define task_pid_nr(task) (task)->pid
++#define task_pid_vnr(task) (task)->pid
++#define find_task_by_vpid(pid) find_task_by_pid(pid)
++
++#endif
+--- linux-2.6.23.orig/include/linux/cpuset.h
++++ linux-2.6.23/include/linux/cpuset.h
+@@ -144,8 +144,13 @@ static inline int cpuset_do_slab_mem_spr
+ return 0;
+ }
+
+ static inline void cpuset_track_online_nodes(void) {}
+
++static inline cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p)
++{
++ return cpu_possible_map;
++}
++
+ #endif /* !CONFIG_CPUSETS */
+
+ #endif /* _LINUX_CPUSET_H */
+--- linux-2.6.23.orig/include/linux/kernel.h
++++ linux-2.6.23/include/linux/kernel.h
+@@ -59,10 +59,17 @@ extern const char linux_proc_banner[];
+ #define KERN_WARNING "<4>" /* warning conditions */
+ #define KERN_NOTICE "<5>" /* normal but significant condition */
+ #define KERN_INFO "<6>" /* informational */
+ #define KERN_DEBUG "<7>" /* debug-level messages */
+
++/*
++ * Annotation for a "continued" line of log printout (only done after a
++ * line that had no enclosing \n). Only to be used by core/arch code
++ * during early bootup (a continued line is not SMP-safe otherwise).
++ */
++#define KERN_CONT ""
++
+ extern int console_printk[];
+
+ #define console_loglevel (console_printk[0])
+ #define default_message_loglevel (console_printk[1])
+ #define minimum_console_loglevel (console_printk[2])
+--- linux-2.6.23.orig/include/linux/kernel_stat.h
++++ linux-2.6.23/include/linux/kernel_stat.h
+@@ -21,10 +21,11 @@ struct cpu_usage_stat {
+ cputime64_t softirq;
+ cputime64_t irq;
+ cputime64_t idle;
+ cputime64_t iowait;
+ cputime64_t steal;
++ cputime64_t guest;
+ };
+
+ struct kernel_stat {
+ struct cpu_usage_stat cpustat;
+ unsigned int irqs[NR_IRQS];
+@@ -50,9 +51,11 @@ static inline int kstat_irqs(int irq)
+
+ return sum;
+ }
+
+ extern void account_user_time(struct task_struct *, cputime_t);
++extern void account_user_time_scaled(struct task_struct *, cputime_t);
+ extern void account_system_time(struct task_struct *, int, cputime_t);
++extern void account_system_time_scaled(struct task_struct *, cputime_t);
+ extern void account_steal_time(struct task_struct *, cputime_t);
+
+ #endif /* _LINUX_KERNEL_STAT_H */
+--- linux-2.6.23.orig/include/linux/nodemask.h
++++ linux-2.6.23/include/linux/nodemask.h
+@@ -336,46 +336,108 @@ static inline void __nodes_remap(nodemas
+ if (!nodes_empty(mask)) \
+ for ((node) = 0; (node) < 1; (node)++)
+ #endif /* MAX_NUMNODES */
+
+ /*
++ * Bitmasks that are kept for all the nodes.
++ */
++enum node_states {
++ N_POSSIBLE, /* The node could become online at some point */
++ N_ONLINE, /* The node is online */
++ N_NORMAL_MEMORY, /* The node has regular memory */
++#ifdef CONFIG_HIGHMEM
++ N_HIGH_MEMORY, /* The node has regular or high memory */
++#else
++ N_HIGH_MEMORY = N_NORMAL_MEMORY,
++#endif
++ N_CPU, /* The node has one or more cpus */
++ NR_NODE_STATES
++};
++
++/*
+ * The following particular system nodemasks and operations
+ * on them manage all possible and online nodes.
+ */
+
+-extern nodemask_t node_online_map;
+-extern nodemask_t node_possible_map;
++extern nodemask_t node_states[NR_NODE_STATES];
+
+ #if MAX_NUMNODES > 1
+-#define num_online_nodes() nodes_weight(node_online_map)
+-#define num_possible_nodes() nodes_weight(node_possible_map)
+-#define node_online(node) node_isset((node), node_online_map)
+-#define node_possible(node) node_isset((node), node_possible_map)
+-#define first_online_node first_node(node_online_map)
+-#define next_online_node(nid) next_node((nid), node_online_map)
++static inline int node_state(int node, enum node_states state)
++{
++ return node_isset(node, node_states[state]);
++}
++
++static inline void node_set_state(int node, enum node_states state)
++{
++ __node_set(node, &node_states[state]);
++}
++
++static inline void node_clear_state(int node, enum node_states state)
++{
++ __node_clear(node, &node_states[state]);
++}
++
++static inline int num_node_state(enum node_states state)
++{
++ return nodes_weight(node_states[state]);
++}
++
++#define for_each_node_state(__node, __state) \
++ for_each_node_mask((__node), node_states[__state])
++
++#define first_online_node first_node(node_states[N_ONLINE])
++#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
++
+ extern int nr_node_ids;
+ #else
+-#define num_online_nodes() 1
+-#define num_possible_nodes() 1
+-#define node_online(node) ((node) == 0)
+-#define node_possible(node) ((node) == 0)
++
++static inline int node_state(int node, enum node_states state)
++{
++ return node == 0;
++}
++
++static inline void node_set_state(int node, enum node_states state)
++{
++}
++
++static inline void node_clear_state(int node, enum node_states state)
++{
++}
++
++static inline int num_node_state(enum node_states state)
++{
++ return 1;
++}
++
++#define for_each_node_state(node, __state) \
++ for ( (node) = 0; (node) == 0; (node) = 1)
++
+ #define first_online_node 0
+ #define next_online_node(nid) (MAX_NUMNODES)
+ #define nr_node_ids 1
++
+ #endif
+
++#define node_online_map node_states[N_ONLINE]
++#define node_possible_map node_states[N_POSSIBLE]
++
+ #define any_online_node(mask) \
+ ({ \
+ int node; \
+ for_each_node_mask(node, (mask)) \
+ if (node_online(node)) \
+ break; \
+ node; \
+ })
+
+-#define node_set_online(node) set_bit((node), node_online_map.bits)
+-#define node_set_offline(node) clear_bit((node), node_online_map.bits)
++#define num_online_nodes() num_node_state(N_ONLINE)
++#define num_possible_nodes() num_node_state(N_POSSIBLE)
++#define node_online(node) node_state((node), N_ONLINE)
++#define node_possible(node) node_state((node), N_POSSIBLE)
++
++#define node_set_online(node) node_set_state((node), N_ONLINE)
++#define node_set_offline(node) node_clear_state((node), N_ONLINE)
+
+-#define for_each_node(node) for_each_node_mask((node), node_possible_map)
+-#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
++#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
++#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+
+ #endif /* __LINUX_NODEMASK_H */
+--- linux-2.6.23.orig/include/linux/sched.h
++++ linux-2.6.23/include/linux/sched.h
+@@ -1,10 +1,21 @@
+ #ifndef _LINUX_SCHED_H
+ #define _LINUX_SCHED_H
+
+ #include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
+
++/* backporting helper macro: */
++#define cpu_sibling_map(cpu) cpu_sibling_map[cpu]
++
++/*
++ * * Control groups are not backported - we use a few compatibility
++ * * defines to be able to use the upstream sched.c as-is:
++ * */
++#define task_pid_nr(task) (task)->pid
++#define task_pid_vnr(task) (task)->pid
++#define find_task_by_vpid(pid) find_task_by_pid(pid)
++
+ /*
+ * cloning flags:
+ */
+ #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+ #define CLONE_VM 0x00000100 /* set if VM shared between processes */
+@@ -84,10 +95,11 @@ struct sched_param {
+ #include <linux/param.h>
+ #include <linux/resource.h>
+ #include <linux/timer.h>
+ #include <linux/hrtimer.h>
+ #include <linux/task_io_accounting.h>
++#include <linux/kobject.h>
+
+ #include <asm/processor.h>
+
+ struct exec_domain;
+ struct futex_pi_state;
+@@ -133,10 +145,11 @@ extern unsigned long nr_active(void);
+ extern unsigned long nr_iowait(void);
+ extern unsigned long weighted_cpuload(const int cpu);
+
+ struct seq_file;
+ struct cfs_rq;
++struct task_group;
+ #ifdef CONFIG_SCHED_DEBUG
+ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+ extern void proc_sched_set_task(struct task_struct *p);
+ extern void
+ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+@@ -171,12 +184,11 @@ print_cfs_rq(struct seq_file *m, int cpu
+ #define TASK_TRACED 8
+ /* in tsk->exit_state */
+ #define EXIT_ZOMBIE 16
+ #define EXIT_DEAD 32
+ /* in tsk->state again */
+-#define TASK_NONINTERACTIVE 64
+-#define TASK_DEAD 128
++#define TASK_DEAD 64
+
+ #define __set_task_state(tsk, state_value) \
+ do { (tsk)->state = (state_value); } while (0)
+ #define set_task_state(tsk, state_value) \
+ set_mb((tsk)->state, (state_value))
+@@ -276,10 +288,14 @@ static inline void touch_all_softlockup_
+ #endif
+
+
+ /* Attach to any functions which should be ignored in wchan output. */
+ #define __sched __attribute__((__section__(".sched.text")))
++
++/* Linker adds these: start and end of __sched functions */
++extern char __sched_text_start[], __sched_text_end[];
++
+ /* Is this address in the __sched functions? */
+ extern int in_sched_functions(unsigned long addr);
+
+ #define MAX_SCHEDULE_TIMEOUT LONG_MAX
+ extern signed long FASTCALL(schedule_timeout(signed long timeout));
+@@ -513,10 +529,12 @@ struct signal_struct {
+ * and for reaped dead child processes forked by this group.
+ * Live threads maintain their own counters and add to these
+ * in __exit_signal, except for the group leader.
+ */
+ cputime_t utime, stime, cutime, cstime;
++ cputime_t gtime;
++ cputime_t cgtime;
+ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
+ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
+
+ /*
+@@ -593,12 +611,27 @@ struct user_struct {
+ #endif
+
+ /* Hash table maintenance information */
+ struct hlist_node uidhash_node;
+ uid_t uid;
++
++#ifdef CONFIG_FAIR_USER_SCHED
++ struct task_group *tg;
++#ifdef CONFIG_SYSFS
++ struct kset kset;
++ struct subsys_attribute user_attr;
++ struct work_struct work;
++#endif
++#endif
+ };
+
++#ifdef CONFIG_FAIR_USER_SCHED
++extern int uids_kobject_init(void);
++#else
++static inline int uids_kobject_init(void) { return 0; }
++#endif
++
+ extern struct user_struct *find_user(uid_t);
+
+ extern struct user_struct root_user;
+ #define INIT_USER (&root_user)
+
+@@ -606,17 +639,21 @@ struct backing_dev_info;
+ struct reclaim_state;
+
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ struct sched_info {
+ /* cumulative counters */
+- unsigned long pcnt; /* # of times run on this cpu */
++ unsigned long pcount; /* # of times run on this cpu */
+ unsigned long long cpu_time, /* time spent on the cpu */
+ run_delay; /* time spent waiting on a runqueue */
+
+ /* timestamps */
+ unsigned long long last_arrival,/* when we last ran on a cpu */
+ last_queued; /* when we were last queued to run */
++#ifdef CONFIG_SCHEDSTATS
++ /* BKL stats */
++ unsigned int bkl_count;
++#endif
+ };
+ #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+
+ #ifdef CONFIG_SCHEDSTATS
+ extern const struct file_operations proc_schedstat_operations;
+@@ -747,43 +784,42 @@ struct sched_domain {
+ unsigned int balance_interval; /* initialise to 1. units in ms. */
+ unsigned int nr_balance_failed; /* initialise to 0 */
+
+ #ifdef CONFIG_SCHEDSTATS
+ /* load_balance() stats */
+- unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_gained[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_count[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
+
+ /* Active load balancing */
+- unsigned long alb_cnt;
+- unsigned long alb_failed;
+- unsigned long alb_pushed;
++ unsigned int alb_count;
++ unsigned int alb_failed;
++ unsigned int alb_pushed;
+
+ /* SD_BALANCE_EXEC stats */
+- unsigned long sbe_cnt;
+- unsigned long sbe_balanced;
+- unsigned long sbe_pushed;
++ unsigned int sbe_count;
++ unsigned int sbe_balanced;
++ unsigned int sbe_pushed;
+
+ /* SD_BALANCE_FORK stats */
+- unsigned long sbf_cnt;
+- unsigned long sbf_balanced;
+- unsigned long sbf_pushed;
++ unsigned int sbf_count;
++ unsigned int sbf_balanced;
++ unsigned int sbf_pushed;
+
+ /* try_to_wake_up() stats */
+- unsigned long ttwu_wake_remote;
+- unsigned long ttwu_move_affine;
+- unsigned long ttwu_move_balance;
++ unsigned int ttwu_wake_remote;
++ unsigned int ttwu_move_affine;
++ unsigned int ttwu_move_balance;
+ #endif
+ };
+
+-extern int partition_sched_domains(cpumask_t *partition1,
+- cpumask_t *partition2);
++extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
+
+ #endif /* CONFIG_SMP */
+
+ /*
+ * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
+@@ -851,27 +887,32 @@ struct uts_namespace;
+
+ struct rq;
+ struct sched_domain;
+
+ struct sched_class {
+- struct sched_class *next;
++ const struct sched_class *next;
+
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
+- void (*yield_task) (struct rq *rq, struct task_struct *p);
++ void (*yield_task) (struct rq *rq);
+
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+
+ struct task_struct * (*pick_next_task) (struct rq *rq);
+ void (*put_prev_task) (struct rq *rq, struct task_struct *p);
+
++#ifdef CONFIG_SMP
+ unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
+- struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
++ struct rq *busiest, unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, int *this_best_prio);
+
++ int (*move_one_task) (struct rq *this_rq, int this_cpu,
++ struct rq *busiest, struct sched_domain *sd,
++ enum cpu_idle_type idle);
++#endif
++
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
+ };
+
+@@ -885,46 +926,52 @@ struct load_weight {
+ * Current field usage histogram:
+ *
+ * 4 se->block_start
+ * 4 se->run_node
+ * 4 se->sleep_start
+- * 4 se->sleep_start_fair
+ * 6 se->load.weight
+- * 7 se->delta_fair
+- * 15 se->wait_runtime
+ */
+ struct sched_entity {
+- long wait_runtime;
+- unsigned long delta_fair_run;
+- unsigned long delta_fair_sleep;
+- unsigned long delta_exec;
+- s64 fair_key;
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
++ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+- u64 wait_start_fair;
+- u64 sleep_start_fair;
+
+ #ifdef CONFIG_SCHEDSTATS
+ u64 wait_start;
+ u64 wait_max;
+- s64 sum_wait_runtime;
+
+ u64 sleep_start;
+ u64 sleep_max;
+ s64 sum_sleep_runtime;
+
+ u64 block_start;
+ u64 block_max;
+ u64 exec_max;
++ u64 slice_max;
+
+- unsigned long wait_runtime_overruns;
+- unsigned long wait_runtime_underruns;
++ u64 nr_migrations;
++ u64 nr_migrations_cold;
++ u64 nr_failed_migrations_affine;
++ u64 nr_failed_migrations_running;
++ u64 nr_failed_migrations_hot;
++ u64 nr_forced_migrations;
++ u64 nr_forced2_migrations;
++
++ u64 nr_wakeups;
++ u64 nr_wakeups_sync;
++ u64 nr_wakeups_migrate;
++ u64 nr_wakeups_local;
++ u64 nr_wakeups_remote;
++ u64 nr_wakeups_affine;
++ u64 nr_wakeups_affine_attempts;
++ u64 nr_wakeups_passive;
++ u64 nr_wakeups_idle;
+ #endif
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+@@ -949,11 +996,11 @@ struct task_struct {
+ #endif
+ #endif
+
+ int prio, static_prio, normal_prio;
+ struct list_head run_list;
+- struct sched_class *sched_class;
++ const struct sched_class *sched_class;
+ struct sched_entity se;
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+@@ -1019,11 +1066,12 @@ struct task_struct {
+ struct completion *vfork_done; /* for vfork() */
+ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ unsigned int rt_priority;
+- cputime_t utime, stime;
++ cputime_t utime, stime, utimescaled, stimescaled;
++ cputime_t gtime;
+ cputime_t prev_utime, prev_stime;
+ unsigned long nvcsw, nivcsw; /* context switch counts */
+ struct timespec start_time; /* monotonic time */
+ struct timespec real_start_time; /* boot based time */
+ /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+@@ -1312,10 +1360,11 @@ static inline void put_task_struct(struc
+ #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+ #define PF_STARTING 0x00000002 /* being created */
+ #define PF_EXITING 0x00000004 /* getting shut down */
+ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
++#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+ #define PF_DUMPCORE 0x00000200 /* dumped core */
+ #define PF_SIGNALED 0x00000400 /* killed by a signal */
+ #define PF_MEMALLOC 0x00000800 /* Allocating memory */
+@@ -1399,19 +1448,30 @@ extern void idle_task_exit(void);
+ static inline void idle_task_exit(void) {}
+ #endif
+
+ extern void sched_idle_next(void);
+
++#ifdef CONFIG_SCHED_DEBUG
+ extern unsigned int sysctl_sched_latency;
+ extern unsigned int sysctl_sched_min_granularity;
+ extern unsigned int sysctl_sched_wakeup_granularity;
+ extern unsigned int sysctl_sched_batch_wakeup_granularity;
+-extern unsigned int sysctl_sched_stat_granularity;
+-extern unsigned int sysctl_sched_runtime_limit;
+-extern unsigned int sysctl_sched_compat_yield;
+ extern unsigned int sysctl_sched_child_runs_first;
+ extern unsigned int sysctl_sched_features;
++extern unsigned int sysctl_sched_migration_cost;
++extern unsigned int sysctl_sched_nr_migrate;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++extern unsigned int sysctl_sched_min_bal_int_shares;
++extern unsigned int sysctl_sched_max_bal_int_shares;
++#endif
++
++int sched_nr_latency_handler(struct ctl_table *table, int write,
++ struct file *file, void __user *buffer, size_t *length,
++ loff_t *ppos);
++#endif
++
++extern unsigned int sysctl_sched_compat_yield;
+
+ #ifdef CONFIG_RT_MUTEXES
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+@@ -1841,10 +1901,22 @@ extern long sched_getaffinity(pid_t pid,
+
+ extern int sched_mc_power_savings, sched_smt_power_savings;
+
+ extern void normalize_rt_tasks(void);
+
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++extern struct task_group init_task_group;
++
++extern struct task_group *sched_create_group(void);
++extern void sched_destroy_group(struct task_group *tg);
++extern void sched_move_task(struct task_struct *tsk);
++extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
++extern unsigned long sched_group_shares(struct task_group *tg);
++
++#endif
++
+ #ifdef CONFIG_TASK_XACCT
+ static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+ {
+ tsk->rchar += amt;
+ }
+@@ -1879,8 +1951,16 @@ static inline void inc_syscr(struct task
+ static inline void inc_syscw(struct task_struct *tsk)
+ {
+ }
+ #endif
+
++#ifdef CONFIG_SMP
++void migration_init(void);
++#else
++static inline void migration_init(void)
++{
++}
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif
+--- linux-2.6.23.orig/include/linux/taskstats.h
++++ linux-2.6.23/include/linux/taskstats.h
+@@ -29,11 +29,11 @@
+ * b) add comment indicating new version number at end of struct
+ * c) add new fields after version comment; maintain 64-bit alignment
+ */
+
+
+-#define TASKSTATS_VERSION 5
++#define TASKSTATS_VERSION 6
+ #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
+ * in linux/sched.h */
+
+ struct taskstats {
+
+@@ -150,10 +150,15 @@ struct taskstats {
+ __u64 write_bytes; /* bytes of write I/O */
+ __u64 cancelled_write_bytes; /* bytes of cancelled write I/O */
+
+ __u64 nvcsw; /* voluntary_ctxt_switches */
+ __u64 nivcsw; /* nonvoluntary_ctxt_switches */
++
++ /* time accounting for SMT machines */
++ __u64 ac_utimescaled; /* utime scaled on frequency etc */
++ __u64 ac_stimescaled; /* stime scaled on frequency etc */
++ __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */
+ };
+
+
+ /*
+ * Commands sent from userspace
+--- linux-2.6.23.orig/include/linux/topology.h
++++ linux-2.6.23/include/linux/topology.h
+@@ -157,19 +157,18 @@
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+- .idle_idx = 0, \
+- .newidle_idx = 0, \
++ .idle_idx = 1, \
++ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_NEWIDLE \
+ | SD_BALANCE_EXEC \
+ | SD_WAKE_AFFINE \
+- | SD_WAKE_IDLE \
+ | BALANCE_FOR_PKG_POWER,\
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .nr_balance_failed = 0, \
+ }
+--- linux-2.6.23.orig/init/Kconfig
++++ linux-2.6.23/init/Kconfig
+@@ -271,18 +271,44 @@ config LOG_BUF_SHIFT
+ 12 => 4 KB
+
+ config CPUSETS
+ bool "Cpuset support"
+ depends on SMP
++ #
++ # disabled for now - depends on control groups, which
++ # are hard to backport:
++ #
++ depends on 0
+ help
+ This option will let you create and manage CPUSETs which
+ allow dynamically partitioning a system into sets of CPUs and
+ Memory Nodes and assigning tasks to run only within those sets.
+ This is primarily useful on large SMP or NUMA systems.
+
+ Say N if unsure.
+
++config FAIR_GROUP_SCHED
++ bool "Fair group CPU scheduler"
++ default y
++ depends on EXPERIMENTAL
++ help
++ This feature lets CPU scheduler recognize task groups and control CPU
++ bandwidth allocation to such task groups.
++
++choice
++ depends on FAIR_GROUP_SCHED
++ prompt "Basis for grouping tasks"
++ default FAIR_USER_SCHED
++
++config FAIR_USER_SCHED
++ bool "user id"
++ help
++ This option will choose userid as the basis for grouping
++ tasks, thus providing equal CPU bandwidth to each user.
++
++endchoice
++
+ config SYSFS_DEPRECATED
+ bool "Create deprecated sysfs files"
+ default y
+ help
+ This option creates deprecated symlinks such as the
+--- linux-2.6.23.orig/init/main.c
++++ linux-2.6.23/init/main.c
+@@ -750,15 +750,12 @@ static int __init nosoftlockup_setup(cha
+ __setup("nosoftlockup", nosoftlockup_setup);
+
+ static void __init do_pre_smp_initcalls(void)
+ {
+ extern int spawn_ksoftirqd(void);
+-#ifdef CONFIG_SMP
+- extern int migration_init(void);
+
+ migration_init();
+-#endif
+ spawn_ksoftirqd();
+ if (!nosoftlockup)
+ spawn_softlockup_task();
+ }
+
+--- linux-2.6.23.orig/kernel/delayacct.c
++++ linux-2.6.23/kernel/delayacct.c
+@@ -113,15 +113,21 @@ int __delayacct_add_tsk(struct taskstats
+ tmp = (s64)d->cpu_run_real_total;
+ cputime_to_timespec(tsk->utime + tsk->stime, &ts);
+ tmp += timespec_to_ns(&ts);
+ d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
+
++ tmp = (s64)d->cpu_scaled_run_real_total;
++ cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
++ tmp += timespec_to_ns(&ts);
++ d->cpu_scaled_run_real_total =
++ (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
++
+ /*
+ * No locking available for sched_info (and too expensive to add one)
+ * Mitigate by taking snapshot of values
+ */
+- t1 = tsk->sched_info.pcnt;
++ t1 = tsk->sched_info.pcount;
+ t2 = tsk->sched_info.run_delay;
+ t3 = tsk->sched_info.cpu_time;
+
+ d->cpu_count += t1;
+
+--- linux-2.6.23.orig/kernel/exit.c
++++ linux-2.6.23/kernel/exit.c
+@@ -109,10 +109,11 @@ static void __exit_signal(struct task_st
+ * We won't ever get here for the group leader, since it
+ * will have been the last reference on the signal_struct.
+ */
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
++ sig->gtime = cputime_add(sig->gtime, tsk->gtime);
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
+ sig->inblock += task_io_get_inblock(tsk);
+@@ -1240,10 +1241,15 @@ static int wait_task_zombie(struct task_
+ psig->cstime =
+ cputime_add(psig->cstime,
+ cputime_add(p->stime,
+ cputime_add(sig->stime,
+ sig->cstime)));
++ psig->cgtime =
++ cputime_add(psig->cgtime,
++ cputime_add(p->gtime,
++ cputime_add(sig->gtime,
++ sig->cgtime)));
+ psig->cmin_flt +=
+ p->min_flt + sig->min_flt + sig->cmin_flt;
+ psig->cmaj_flt +=
+ p->maj_flt + sig->maj_flt + sig->cmaj_flt;
+ psig->cnvcsw +=
+--- linux-2.6.23.orig/kernel/fork.c
++++ linux-2.6.23/kernel/fork.c
+@@ -875,10 +875,12 @@ static inline int copy_signal(unsigned l
+
+ sig->leader = 0; /* session leadership doesn't inherit */
+ sig->tty_old_pgrp = NULL;
+
+ sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
++ sig->gtime = cputime_zero;
++ sig->cgtime = cputime_zero;
+ sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
+ sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
+ sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
+ sig->sum_sched_runtime = 0;
+ INIT_LIST_HEAD(&sig->cpu_timers[0]);
+@@ -1045,10 +1047,13 @@ static struct task_struct *copy_process(
+
+ p->utime = cputime_zero;
+ p->stime = cputime_zero;
+ p->prev_utime = cputime_zero;
+ p->prev_stime = cputime_zero;
++ p->gtime = cputime_zero;
++ p->utimescaled = cputime_zero;
++ p->stimescaled = cputime_zero;
+
+ #ifdef CONFIG_TASK_XACCT
+ p->rchar = 0; /* I/O counter: bytes read */
+ p->wchar = 0; /* I/O counter: bytes written */
+ p->syscr = 0; /* I/O counter: read syscalls */
+--- linux-2.6.23.orig/kernel/ksysfs.c
++++ linux-2.6.23/kernel/ksysfs.c
+@@ -12,10 +12,11 @@
+ #include <linux/string.h>
+ #include <linux/sysfs.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/kexec.h>
++#include <linux/sched.h>
+
+ #define KERNEL_ATTR_RO(_name) \
+ static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
+
+ #define KERNEL_ATTR_RW(_name) \
+@@ -114,9 +115,16 @@ static int __init ksysfs_init(void)
+ notes_attr.size = notes_size;
+ error = sysfs_create_bin_file(&kernel_subsys.kobj,
+ &notes_attr);
+ }
+
++ /*
++ * Create "/sys/kernel/uids" directory and corresponding root user's
++ * directory under it.
++ */
++ if (!error)
++ error = uids_kobject_init();
++
+ return error;
+ }
+
+ core_initcall(ksysfs_init);
+--- linux-2.6.23.orig/kernel/sched.c
++++ linux-2.6.23/kernel/sched.c
+@@ -42,10 +42,11 @@
+ #include <linux/profile.h>
+ #include <linux/freezer.h>
+ #include <linux/vmalloc.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
++#include <linux/pid_namespace.h>
+ #include <linux/smp.h>
+ #include <linux/threads.h>
+ #include <linux/timer.h>
+ #include <linux/rcupdate.h>
+ #include <linux/cpu.h>
+@@ -59,21 +60,23 @@
+ #include <linux/tsacct_kern.h>
+ #include <linux/kprobes.h>
+ #include <linux/delayacct.h>
+ #include <linux/reciprocal_div.h>
+ #include <linux/unistd.h>
++#include <linux/pagemap.h>
+
+ #include <asm/tlb.h>
++#include <asm/irq_regs.h>
+
+ /*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+ * Architectures and sub-architectures can override this.
+ */
+ unsigned long long __attribute__((weak)) sched_clock(void)
+ {
+- return (unsigned long long)jiffies * (1000000000 / HZ);
++ return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+ }
+
+ /*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+@@ -93,24 +96,22 @@ unsigned long long __attribute__((weak))
+ #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+ /*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+-#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
+-#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
++#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
++#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ))
+
+ #define NICE_0_LOAD SCHED_LOAD_SCALE
+ #define NICE_0_SHIFT SCHED_LOAD_SHIFT
+
+ /*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
++ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+-#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
+ #define DEF_TIMESLICE (100 * HZ / 1000)
+
+ #ifdef CONFIG_SMP
+ /*
+ * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
+@@ -130,28 +131,10 @@ static inline void sg_inc_cpu_power(stru
+ sg->__cpu_power += val;
+ sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
+ }
+ #endif
+
+-#define SCALE_PRIO(x, prio) \
+- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
+-
+-/*
+- * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+- * to time slice values: [800ms ... 100ms ... 5ms]
+- */
+-static unsigned int static_prio_timeslice(int static_prio)
+-{
+- if (static_prio == NICE_TO_PRIO(19))
+- return 1;
+-
+- if (static_prio < NICE_TO_PRIO(0))
+- return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
+- else
+- return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+-}
+-
+ static inline int rt_policy(int policy)
+ {
+ if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
+ return 1;
+ return 0;
+@@ -168,45 +151,115 @@ static inline int task_has_rt_policy(str
+ struct rt_prio_array {
+ DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
+ struct list_head queue[MAX_RT_PRIO];
+ };
+
+-struct load_stat {
+- struct load_weight load;
+- u64 load_update_start, load_update_last;
+- unsigned long delta_fair, delta_exec, delta_stat;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++#include <linux/cgroup.h>
++
++struct cfs_rq;
++
++/* task group related information */
++struct task_group {
++#ifdef CONFIG_FAIR_CGROUP_SCHED
++ struct cgroup_subsys_state css;
++#endif
++ /* schedulable entities of this group on each cpu */
++ struct sched_entity **se;
++ /* runqueue "owned" by this group on each cpu */
++ struct cfs_rq **cfs_rq;
++ unsigned long shares;
++ /* spinlock to serialize modification to shares */
++ spinlock_t lock;
++ struct rcu_head rcu;
++};
++
++/* Default task group's sched entity on each cpu */
++static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
++/* Default task group's cfs_rq on each cpu */
++static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
++
++static struct sched_entity *init_sched_entity_p[NR_CPUS];
++static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
++
++/* Default task group.
++ * Every task in system belong to this group at bootup.
++ */
++struct task_group init_task_group = {
++ .se = init_sched_entity_p,
++ .cfs_rq = init_cfs_rq_p,
+ };
+
++#ifdef CONFIG_FAIR_USER_SCHED
++# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
++#else
++# define INIT_TASK_GRP_LOAD NICE_0_LOAD
++#endif
++
++static int init_task_group_load = INIT_TASK_GRP_LOAD;
++
++/* return group to which a task belongs */
++static inline struct task_group *task_group(struct task_struct *p)
++{
++ struct task_group *tg;
++
++#ifdef CONFIG_FAIR_USER_SCHED
++ tg = p->user->tg;
++#elif defined(CONFIG_FAIR_CGROUP_SCHED)
++ tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
++ struct task_group, css);
++#else
++ tg = &init_task_group;
++#endif
++ return tg;
++}
++
++/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
++static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
++{
++ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
++ p->se.parent = task_group(p)->se[cpu];
++}
++
++#else
++
++static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
++
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
+ /* CFS-related fields in a runqueue */
+ struct cfs_rq {
+ struct load_weight load;
+ unsigned long nr_running;
+
+- s64 fair_clock;
+ u64 exec_clock;
+- s64 wait_runtime;
+- u64 sleeper_bonus;
+- unsigned long wait_runtime_overruns, wait_runtime_underruns;
++ u64 min_vruntime;
+
+ struct rb_root tasks_timeline;
+ struct rb_node *rb_leftmost;
+ struct rb_node *rb_load_balance_curr;
+-#ifdef CONFIG_FAIR_GROUP_SCHED
+ /* 'curr' points to currently running entity on this cfs_rq.
+ * It is set to NULL otherwise (i.e when none are currently running).
+ */
+ struct sched_entity *curr;
++
++ unsigned long nr_spread_over;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
+
+- /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
++ /*
++ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+ * (like users, containers etc.)
+ *
+ * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+ * list is used during load balance.
+ */
+- struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
++ struct list_head leaf_cfs_rq_list;
++ struct task_group *tg; /* group that "owns" this runqueue */
+ #endif
+ };
+
+ /* Real-Time classes' related field in a runqueue: */
+ struct rt_rq {
+@@ -221,11 +274,12 @@ struct rt_rq {
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+ struct rq {
+- spinlock_t lock; /* runqueue lock */
++ /* runqueue lock: */
++ spinlock_t lock;
+
+ /*
+ * nr_running and cpu_load should be in the same cacheline because
+ * remote CPUs use both these fields when doing load calculation.
+ */
+@@ -234,19 +288,21 @@ struct rq {
+ unsigned long cpu_load[CPU_LOAD_IDX_MAX];
+ unsigned char idle_at_tick;
+ #ifdef CONFIG_NO_HZ
+ unsigned char in_nohz_recently;
+ #endif
+- struct load_stat ls; /* capture load from *all* tasks on this cpu */
++ /* capture load from *all* tasks on this cpu: */
++ struct load_weight load;
+ unsigned long nr_load_updates;
+ u64 nr_switches;
+
+ struct cfs_rq cfs;
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+- struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
++ /* list of leaf cfs_rq on this cpu: */
++ struct list_head leaf_cfs_rq_list;
+ #endif
+- struct rt_rq rt;
++ struct rt_rq rt;
+
+ /*
+ * This is part of a global counter where only the total sum
+ * over all CPUs matters. A task can increase this counter on
+ * one CPU and if it got migrated afterwards it may decrease
+@@ -272,34 +328,38 @@ struct rq {
+ struct sched_domain *sd;
+
+ /* For active balancing */
+ int active_balance;
+ int push_cpu;
+- int cpu; /* cpu of this runqueue */
++ /* cpu of this runqueue: */
++ int cpu;
+
+ struct task_struct *migration_thread;
+ struct list_head migration_queue;
+ #endif
+
+ #ifdef CONFIG_SCHEDSTATS
+ /* latency stats */
+ struct sched_info rq_sched_info;
+
+ /* sys_sched_yield() stats */
+- unsigned long yld_exp_empty;
+- unsigned long yld_act_empty;
+- unsigned long yld_both_empty;
+- unsigned long yld_cnt;
++ unsigned int yld_exp_empty;
++ unsigned int yld_act_empty;
++ unsigned int yld_both_empty;
++ unsigned int yld_count;
+
+ /* schedule() stats */
+- unsigned long sched_switch;
+- unsigned long sched_cnt;
+- unsigned long sched_goidle;
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
+
+ /* try_to_wake_up() stats */
+- unsigned long ttwu_cnt;
+- unsigned long ttwu_local;
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++
++ /* BKL stats */
++ unsigned int bkl_count;
+ #endif
+ struct lock_class_key rq_lock_key;
+ };
+
+ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+@@ -380,10 +440,45 @@ static void update_rq_clock(struct rq *r
+ #define this_rq() (&__get_cpu_var(runqueues))
+ #define task_rq(p) cpu_rq(task_cpu(p))
+ #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+
+ /*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug static const
++#endif
++
++/*
++ * Debugging: various feature bits
++ */
++enum {
++ SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
++ SCHED_FEAT_WAKEUP_PREEMPT = 2,
++ SCHED_FEAT_START_DEBIT = 4,
++ SCHED_FEAT_TREE_AVG = 8,
++ SCHED_FEAT_APPROX_AVG = 16,
++};
++
++const_debug unsigned int sysctl_sched_features =
++ SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
++ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
++ SCHED_FEAT_START_DEBIT * 1 |
++ SCHED_FEAT_TREE_AVG * 0 |
++ SCHED_FEAT_APPROX_AVG * 0;
++
++#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
++
++/*
++ * Number of tasks to iterate in a single balance run.
++ * Limited because this is done with IRQs disabled.
++ */
++const_debug unsigned int sysctl_sched_nr_migrate = 32;
++
++/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+ unsigned long long cpu_clock(int cpu)
+ {
+@@ -391,40 +486,39 @@ unsigned long long cpu_clock(int cpu)
+ unsigned long flags;
+ struct rq *rq;
+
+ local_irq_save(flags);
+ rq = cpu_rq(cpu);
+- update_rq_clock(rq);
++ /*
++ * Only call sched_clock() if the scheduler has already been
++ * initialized (some code might call cpu_clock() very early):
++ */
++ if (rq->idle)
++ update_rq_clock(rq);
+ now = rq->clock;
+ local_irq_restore(flags);
+
+ return now;
+ }
+-
+-#ifdef CONFIG_FAIR_GROUP_SCHED
+-/* Change a task's ->cfs_rq if it moves across CPUs */
+-static inline void set_task_cfs_rq(struct task_struct *p)
+-{
+- p->se.cfs_rq = &task_rq(p)->cfs;
+-}
+-#else
+-static inline void set_task_cfs_rq(struct task_struct *p)
+-{
+-}
+-#endif
++EXPORT_SYMBOL_GPL(cpu_clock);
+
+ #ifndef prepare_arch_switch
+ # define prepare_arch_switch(next) do { } while (0)
+ #endif
+ #ifndef finish_arch_switch
+ # define finish_arch_switch(prev) do { } while (0)
+ #endif
+
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
+ #ifndef __ARCH_WANT_UNLOCKED_CTXSW
+ static inline int task_running(struct rq *rq, struct task_struct *p)
+ {
+- return rq->curr == p;
++ return task_current(rq, p);
+ }
+
+ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+ {
+ }
+@@ -449,11 +543,11 @@ static inline void finish_lock_switch(st
+ static inline int task_running(struct rq *rq, struct task_struct *p)
+ {
+ #ifdef CONFIG_SMP
+ return p->oncpu;
+ #else
+- return rq->curr == p;
++ return task_current(rq, p);
+ #endif
+ }
+
+ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+ {
+@@ -494,44 +588,40 @@ static inline void finish_lock_switch(st
+ * Must be called interrupts disabled.
+ */
+ static inline struct rq *__task_rq_lock(struct task_struct *p)
+ __acquires(rq->lock)
+ {
+- struct rq *rq;
+-
+-repeat_lock_task:
+- rq = task_rq(p);
+- spin_lock(&rq->lock);
+- if (unlikely(rq != task_rq(p))) {
++ for (;;) {
++ struct rq *rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ return rq;
+ spin_unlock(&rq->lock);
+- goto repeat_lock_task;
+ }
+- return rq;
+ }
+
+ /*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+- * interrupts. Note the ordering: we can safely lookup the task_rq without
++ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(rq->lock)
+ {
+ struct rq *rq;
+
+-repeat_lock_task:
+- local_irq_save(*flags);
+- rq = task_rq(p);
+- spin_lock(&rq->lock);
+- if (unlikely(rq != task_rq(p))) {
++ for (;;) {
++ local_irq_save(*flags);
++ rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ return rq;
+ spin_unlock_irqrestore(&rq->lock, *flags);
+- goto repeat_lock_task;
+ }
+- return rq;
+ }
+
+-static inline void __task_rq_unlock(struct rq *rq)
++static void __task_rq_unlock(struct rq *rq)
+ __releases(rq->lock)
+ {
+ spin_unlock(&rq->lock);
+ }
+
+@@ -542,11 +632,11 @@ static inline void task_rq_unlock(struct
+ }
+
+ /*
+ * this_rq_lock - lock this runqueue and disable interrupts.
+ */
+-static inline struct rq *this_rq_lock(void)
++static struct rq *this_rq_lock(void)
+ __acquires(rq->lock)
+ {
+ struct rq *rq;
+
+ local_irq_disable();
+@@ -576,10 +666,11 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep
+ void sched_clock_idle_wakeup_event(u64 delta_ns)
+ {
+ struct rq *rq = cpu_rq(smp_processor_id());
+ u64 now = sched_clock();
+
++ touch_softlockup_watchdog();
+ rq->idle_clock += delta_ns;
+ /*
+ * Override the previous timestamp and ignore all
+ * sched_clock() deltas that occured while we idled,
+ * and use the PM-provided delta_ns to advance the
+@@ -642,23 +733,10 @@ static inline void resched_task(struct t
+ assert_spin_locked(&task_rq(p)->lock);
+ set_tsk_need_resched(p);
+ }
+ #endif
+
+-static u64 div64_likely32(u64 divident, unsigned long divisor)
+-{
+-#if BITS_PER_LONG == 32
+- if (likely(divident <= 0xffffffffULL))
+- return (u32)divident / divisor;
+- do_div(divident, divisor);
+-
+- return divident;
+-#else
+- return divident / divisor;
+-#endif
+-}
+-
+ #if BITS_PER_LONG == 32
+ # define WMULT_CONST (~0UL)
+ #else
+ # define WMULT_CONST (1UL << 32)
+ #endif
+@@ -696,27 +774,25 @@ static inline unsigned long
+ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
+ {
+ return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
+ }
+
+-static void update_load_add(struct load_weight *lw, unsigned long inc)
++static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+ {
+ lw->weight += inc;
+- lw->inv_weight = 0;
+ }
+
+-static void update_load_sub(struct load_weight *lw, unsigned long dec)
++static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
+ {
+ lw->weight -= dec;
+- lw->inv_weight = 0;
+ }
+
+ /*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
++ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+ #define WEIGHT_IDLEPRIO 2
+@@ -774,76 +850,62 @@ struct rq_iterator {
+ void *arg;
+ struct task_struct *(*start)(void *);
+ struct task_struct *(*next)(void *);
+ };
+
+-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, unsigned long *load_moved,
+- int *this_best_prio, struct rq_iterator *iterator);
++#ifdef CONFIG_SMP
++static unsigned long
++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move, struct sched_domain *sd,
++ enum cpu_idle_type idle, int *all_pinned,
++ int *this_best_prio, struct rq_iterator *iterator);
++
++static int
++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ struct rq_iterator *iterator);
++#endif
++
++#ifdef CONFIG_CGROUP_CPUACCT
++static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
++#else
++static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
++#endif
+
+ #include "sched_stats.h"
+-#include "sched_rt.c"
+-#include "sched_fair.c"
+ #include "sched_idletask.c"
++#include "sched_fair.c"
++#include "sched_rt.c"
+ #ifdef CONFIG_SCHED_DEBUG
+ # include "sched_debug.c"
+ #endif
+
+ #define sched_class_highest (&rt_sched_class)
+
+-static void __update_curr_load(struct rq *rq, struct load_stat *ls)
+-{
+- if (rq->curr != rq->idle && ls->load.weight) {
+- ls->delta_exec += ls->delta_stat;
+- ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
+- ls->delta_stat = 0;
+- }
+-}
+-
+ /*
+ * Update delta_exec, delta_fair fields for rq.
+ *
+ * delta_fair clock advances at a rate inversely proportional to
+- * total load (rq->ls.load.weight) on the runqueue, while
++ * total load (rq->load.weight) on the runqueue, while
+ * delta_exec advances at the same rate as wall-clock (provided
+ * cpu is not idle).
+ *
+ * delta_exec / delta_fair is a measure of the (smoothened) load on this
+ * runqueue over any given interval. This (smoothened) load is used
+ * during load balance.
+ *
+- * This function is called /before/ updating rq->ls.load
++ * This function is called /before/ updating rq->load
+ * and when switching tasks.
+ */
+-static void update_curr_load(struct rq *rq)
+-{
+- struct load_stat *ls = &rq->ls;
+- u64 start;
+-
+- start = ls->load_update_start;
+- ls->load_update_start = rq->clock;
+- ls->delta_stat += rq->clock - start;
+- /*
+- * Stagger updates to ls->delta_fair. Very frequent updates
+- * can be expensive.
+- */
+- if (ls->delta_stat >= sysctl_sched_stat_granularity)
+- __update_curr_load(rq, ls);
+-}
+-
+ static inline void inc_load(struct rq *rq, const struct task_struct *p)
+ {
+- update_curr_load(rq);
+- update_load_add(&rq->ls.load, p->se.load.weight);
++ update_load_add(&rq->load, p->se.load.weight);
+ }
+
+ static inline void dec_load(struct rq *rq, const struct task_struct *p)
+ {
+- update_curr_load(rq);
+- update_load_sub(&rq->ls.load, p->se.load.weight);
++ update_load_sub(&rq->load, p->se.load.weight);
+ }
+
+ static void inc_nr_running(struct task_struct *p, struct rq *rq)
+ {
+ rq->nr_running++;
+@@ -856,12 +918,10 @@ static void dec_nr_running(struct task_s
+ dec_load(rq, p);
+ }
+
+ static void set_load_weight(struct task_struct *p)
+ {
+- p->se.wait_runtime = 0;
+-
+ if (task_has_rt_policy(p)) {
+ p->se.load.weight = prio_to_weight[0] * 2;
+ p->se.load.inv_weight = prio_to_wmult[0] >> 1;
+ return;
+ }
+@@ -949,24 +1009,10 @@ static void activate_task(struct rq *rq,
+ enqueue_task(rq, p, wakeup);
+ inc_nr_running(p, rq);
+ }
+
+ /*
+- * activate_idle_task - move idle task to the _front_ of runqueue.
+- */
+-static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
+-{
+- update_rq_clock(rq);
+-
+- if (p->state == TASK_UNINTERRUPTIBLE)
+- rq->nr_uninterruptible--;
+-
+- enqueue_task(rq, p, 0);
+- inc_nr_running(p, rq);
+-}
+-
+-/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
+ {
+ if (p->state == TASK_UNINTERRUPTIBLE)
+@@ -986,45 +1032,76 @@ inline int task_curr(const struct task_s
+ }
+
+ /* Used instead of source_load when we know the type == 0 */
+ unsigned long weighted_cpuload(const int cpu)
+ {
+- return cpu_rq(cpu)->ls.load.weight;
++ return cpu_rq(cpu)->load.weight;
+ }
+
+ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+ {
++ set_task_cfs_rq(p, cpu);
+ #ifdef CONFIG_SMP
++ /*
++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
++ * successfuly executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
+ task_thread_info(p)->cpu = cpu;
+- set_task_cfs_rq(p);
+ #endif
+ }
+
+ #ifdef CONFIG_SMP
+
++/*
++ * Is this task likely cache-hot:
++ */
++static inline int
++task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
++{
++ s64 delta;
++
++ if (p->sched_class != &fair_sched_class)
++ return 0;
++
++ if (sysctl_sched_migration_cost == -1)
++ return 1;
++ if (sysctl_sched_migration_cost == 0)
++ return 0;
++
++ delta = now - p->se.exec_start;
++
++ return delta < (s64)sysctl_sched_migration_cost;
++}
++
++
+ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ {
+ int old_cpu = task_cpu(p);
+ struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
+- u64 clock_offset, fair_clock_offset;
++ struct cfs_rq *old_cfsrq = task_cfs_rq(p),
++ *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
++ u64 clock_offset;
+
+ clock_offset = old_rq->clock - new_rq->clock;
+- fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
+-
+- if (p->se.wait_start_fair)
+- p->se.wait_start_fair -= fair_clock_offset;
+- if (p->se.sleep_start_fair)
+- p->se.sleep_start_fair -= fair_clock_offset;
+
+ #ifdef CONFIG_SCHEDSTATS
+ if (p->se.wait_start)
+ p->se.wait_start -= clock_offset;
+ if (p->se.sleep_start)
+ p->se.sleep_start -= clock_offset;
+ if (p->se.block_start)
+ p->se.block_start -= clock_offset;
++ if (old_cpu != new_cpu) {
++ schedstat_inc(p, se.nr_migrations);
++ if (task_hot(p, old_rq->clock, NULL))
++ schedstat_inc(p, se.nr_forced2_migrations);
++ }
+ #endif
++ p->se.vruntime -= old_cfsrq->min_vruntime -
++ new_cfsrq->min_vruntime;
+
+ __set_task_cpu(p, new_cpu);
+ }
+
+ struct migration_req {
+@@ -1075,73 +1152,75 @@ void wait_task_inactive(struct task_stru
+ {
+ unsigned long flags;
+ int running, on_rq;
+ struct rq *rq;
+
+-repeat:
+- /*
+- * We do the initial early heuristics without holding
+- * any task-queue locks at all. We'll only try to get
+- * the runqueue lock when things look like they will
+- * work out!
+- */
+- rq = task_rq(p);
++ for (;;) {
++ /*
++ * We do the initial early heuristics without holding
++ * any task-queue locks at all. We'll only try to get
++ * the runqueue lock when things look like they will
++ * work out!
++ */
++ rq = task_rq(p);
+
+- /*
+- * If the task is actively running on another CPU
+- * still, just relax and busy-wait without holding
+- * any locks.
+- *
+- * NOTE! Since we don't hold any locks, it's not
+- * even sure that "rq" stays as the right runqueue!
+- * But we don't care, since "task_running()" will
+- * return false if the runqueue has changed and p
+- * is actually now running somewhere else!
+- */
+- while (task_running(rq, p))
+- cpu_relax();
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since "task_running()" will
++ * return false if the runqueue has changed and p
++ * is actually now running somewhere else!
++ */
++ while (task_running(rq, p))
++ cpu_relax();
+
+- /*
+- * Ok, time to look more closely! We need the rq
+- * lock now, to be *sure*. If we're wrong, we'll
+- * just go back and repeat.
+- */
+- rq = task_rq_lock(p, &flags);
+- running = task_running(rq, p);
+- on_rq = p->se.on_rq;
+- task_rq_unlock(rq, &flags);
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ running = task_running(rq, p);
++ on_rq = p->se.on_rq;
++ task_rq_unlock(rq, &flags);
+
+- /*
+- * Was it really running after all now that we
+- * checked with the proper locks actually held?
+- *
+- * Oops. Go back and try again..
+- */
+- if (unlikely(running)) {
+- cpu_relax();
+- goto repeat;
+- }
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
+
+- /*
+- * It's not enough that it's not actively running,
+- * it must be off the runqueue _entirely_, and not
+- * preempted!
+- *
+- * So if it wa still runnable (but just not actively
+- * running right now), it's preempted, and we should
+- * yield - it could be a while.
+- */
+- if (unlikely(on_rq)) {
+- yield();
+- goto repeat;
+- }
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it wa still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(on_rq)) {
++ schedule_timeout_uninterruptible(1);
++ continue;
++ }
+
+- /*
+- * Ahh, all good. It wasn't running, and it wasn't
+- * runnable, which means that it will never become
+- * running in the future either. We're all done!
+- */
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
+ }
+
+ /***
+ * kick_process - kick a running thread to enter/exit the kernel
+ * @p: the to-be-kicked thread
+@@ -1171,11 +1250,11 @@ void kick_process(struct task_struct *p)
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+-static inline unsigned long source_load(int cpu, int type)
++static unsigned long source_load(int cpu, int type)
+ {
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0)
+@@ -1186,11 +1265,11 @@ static inline unsigned long source_load(
+
+ /*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+-static inline unsigned long target_load(int cpu, int type)
++static unsigned long target_load(int cpu, int type)
+ {
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0)
+@@ -1228,11 +1307,11 @@ find_idlest_group(struct sched_domain *s
+ int local_group;
+ int i;
+
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+- goto nextgroup;
++ continue;
+
+ local_group = cpu_isset(this_cpu, group->cpumask);
+
+ /* Tally up the load of all CPUs in the group */
+ avg_load = 0;
+@@ -1256,13 +1335,11 @@ find_idlest_group(struct sched_domain *s
+ this = group;
+ } else if (avg_load < min_load) {
+ min_load = avg_load;
+ idlest = group;
+ }
+-nextgroup:
+- group = group->next;
+- } while (group != sd->groups);
++ } while (group = group->next, group != sd->groups);
+
+ if (!idlest || 100*this_load < imbalance*min_load)
+ return NULL;
+ return idlest;
+ }
+@@ -1390,12 +1467,17 @@ static int wake_idle(int cpu, struct tas
+
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_IDLE) {
+ cpus_and(tmp, sd->span, p->cpus_allowed);
+ for_each_cpu_mask(i, tmp) {
+- if (idle_cpu(i))
++ if (idle_cpu(i)) {
++ if (i != task_cpu(p)) {
++ schedstat_inc(p,
++ se.nr_wakeups_idle);
++ }
+ return i;
++ }
+ }
+ } else {
+ break;
+ }
+ }
+@@ -1422,11 +1504,11 @@ static inline int wake_idle(int cpu, str
+ *
+ * returns failure only if the task is already active.
+ */
+ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+ {
+- int cpu, this_cpu, success = 0;
++ int cpu, orig_cpu, this_cpu, success = 0;
+ unsigned long flags;
+ long old_state;
+ struct rq *rq;
+ #ifdef CONFIG_SMP
+ struct sched_domain *sd, *this_sd = NULL;
+@@ -1441,19 +1523,20 @@ static int try_to_wake_up(struct task_st
+
+ if (p->se.on_rq)
+ goto out_running;
+
+ cpu = task_cpu(p);
++ orig_cpu = cpu;
+ this_cpu = smp_processor_id();
+
+ #ifdef CONFIG_SMP
+ if (unlikely(task_running(rq, p)))
+ goto out_activate;
+
+ new_cpu = cpu;
+
+- schedstat_inc(rq, ttwu_cnt);
++ schedstat_inc(rq, ttwu_count);
+ if (cpu == this_cpu) {
+ schedstat_inc(rq, ttwu_local);
+ goto out_set_cpu;
+ }
+
+@@ -1484,10 +1567,17 @@ static int try_to_wake_up(struct task_st
+
+ if (this_sd->flags & SD_WAKE_AFFINE) {
+ unsigned long tl = this_load;
+ unsigned long tl_per_task;
+
++ /*
++ * Attract cache-cold tasks on sync wakeups:
++ */
++ if (sync && !task_hot(p, rq->clock, this_sd))
++ goto out_set_cpu;
++
++ schedstat_inc(p, se.nr_wakeups_affine_attempts);
+ tl_per_task = cpu_avg_load_per_task(this_cpu);
+
+ /*
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+@@ -1503,10 +1593,11 @@ static int try_to_wake_up(struct task_st
+ * This domain has SD_WAKE_AFFINE and
+ * p is cache cold in this domain, and
+ * there is no bad imbalance.
+ */
+ schedstat_inc(this_sd, ttwu_move_affine);
++ schedstat_inc(p, se.nr_wakeups_affine);
+ goto out_set_cpu;
+ }
+ }
+
+ /*
+@@ -1514,10 +1605,11 @@ static int try_to_wake_up(struct task_st
+ * limit is reached.
+ */
+ if (this_sd->flags & SD_WAKE_BALANCE) {
+ if (imbalance*this_load <= 100*load) {
+ schedstat_inc(this_sd, ttwu_move_balance);
++ schedstat_inc(p, se.nr_wakeups_passive);
+ goto out_set_cpu;
+ }
+ }
+ }
+
+@@ -1539,22 +1631,22 @@ out_set_cpu:
+ cpu = task_cpu(p);
+ }
+
+ out_activate:
+ #endif /* CONFIG_SMP */
++ schedstat_inc(p, se.nr_wakeups);
++ if (sync)
++ schedstat_inc(p, se.nr_wakeups_sync);
++ if (orig_cpu != cpu)
++ schedstat_inc(p, se.nr_wakeups_migrate);
++ if (cpu == this_cpu)
++ schedstat_inc(p, se.nr_wakeups_local);
++ else
++ schedstat_inc(p, se.nr_wakeups_remote);
+ update_rq_clock(rq);
+ activate_task(rq, p, 1);
+- /*
+- * Sync wakeups (i.e. those types of wakeups where the waker
+- * has indicated that it will leave the CPU in short order)
+- * don't trigger a preemption, if the woken up task will run on
+- * this cpu. (in this case the 'I will reschedule' promise of
+- * the waker guarantees that the freshly woken up task is going
+- * to be considered on this CPU.)
+- */
+- if (!sync || cpu != this_cpu)
+- check_preempt_curr(rq, p);
++ check_preempt_curr(rq, p);
+ success = 1;
+
+ out_running:
+ p->state = TASK_RUNNING;
+ out:
+@@ -1581,32 +1673,24 @@ int fastcall wake_up_state(struct task_s
+ *
+ * __sched_fork() is basic setup used by init_idle() too:
+ */
+ static void __sched_fork(struct task_struct *p)
+ {
+- p->se.wait_start_fair = 0;
+ p->se.exec_start = 0;
+ p->se.sum_exec_runtime = 0;
+ p->se.prev_sum_exec_runtime = 0;
+- p->se.delta_exec = 0;
+- p->se.delta_fair_run = 0;
+- p->se.delta_fair_sleep = 0;
+- p->se.wait_runtime = 0;
+- p->se.sleep_start_fair = 0;
+
+ #ifdef CONFIG_SCHEDSTATS
+ p->se.wait_start = 0;
+- p->se.sum_wait_runtime = 0;
+ p->se.sum_sleep_runtime = 0;
+ p->se.sleep_start = 0;
+ p->se.block_start = 0;
+ p->se.sleep_max = 0;
+ p->se.block_max = 0;
+ p->se.exec_max = 0;
++ p->se.slice_max = 0;
+ p->se.wait_max = 0;
+- p->se.wait_runtime_overruns = 0;
+- p->se.wait_runtime_underruns = 0;
+ #endif
+
+ INIT_LIST_HEAD(&p->run_list);
+ p->se.on_rq = 0;
+
+@@ -1633,16 +1717,18 @@ void sched_fork(struct task_struct *p, i
+ __sched_fork(p);
+
+ #ifdef CONFIG_SMP
+ cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
+ #endif
+- __set_task_cpu(p, cpu);
++ set_task_cpu(p, cpu);
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child:
+ */
+ p->prio = current->normal_prio;
++ if (!rt_prio(p->prio))
++ p->sched_class = &fair_sched_class;
+
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ if (likely(sched_info_on()))
+ memset(&p->sched_info, 0, sizeof(p->sched_info));
+ #endif
+@@ -1655,44 +1741,28 @@ void sched_fork(struct task_struct *p, i
+ #endif
+ put_cpu();
+ }
+
+ /*
+- * After fork, child runs first. (default) If set to 0 then
+- * parent will (try to) run first.
+- */
+-unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
+-
+-/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
+ {
+ unsigned long flags;
+ struct rq *rq;
+- int this_cpu;
+
+ rq = task_rq_lock(p, &flags);
+ BUG_ON(p->state != TASK_RUNNING);
+- this_cpu = smp_processor_id(); /* parent's CPU */
+ update_rq_clock(rq);
+
+ p->prio = effective_prio(p);
+
+- if (rt_prio(p->prio))
+- p->sched_class = &rt_sched_class;
+- else
+- p->sched_class = &fair_sched_class;
+-
+- if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
+- (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
+- !current->se.on_rq) {
+-
++ if (!p->sched_class->task_new || !current->se.on_rq) {
+ activate_task(rq, p, 0);
+ } else {
+ /*
+ * Let the scheduling class do new task startup
+ * management (if any):
+@@ -1793,15 +1863,15 @@ prepare_task_switch(struct rq *rq, struc
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+- * so, we finish that here outside of the runqueue lock. (Doing it
++ * so, we finish that here outside of the runqueue lock. (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ */
+-static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
++static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+ __releases(rq->lock)
+ {
+ struct mm_struct *mm = rq->prev_mm;
+ long prev_state;
+
+@@ -1847,11 +1917,11 @@ asmlinkage void schedule_tail(struct tas
+ #ifdef __ARCH_WANT_UNLOCKED_CTXSW
+ /* In this case, finish_task_switch does not reenable preemption */
+ preempt_enable();
+ #endif
+ if (current->set_child_tid)
+- put_user(current->pid, current->set_child_tid);
++ put_user(task_pid_vnr(current), current->set_child_tid);
+ }
+
+ /*
+ * context_switch - switch to the new MM and the new
+ * thread's register state.
+@@ -1979,56 +2049,30 @@ unsigned long nr_active(void)
+ * Update rq->cpu_load[] statistics. This function is usually called every
+ * scheduler tick (TICK_NSEC).
+ */
+ static void update_cpu_load(struct rq *this_rq)
+ {
+- u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
+- unsigned long total_load = this_rq->ls.load.weight;
+- unsigned long this_load = total_load;
+- struct load_stat *ls = &this_rq->ls;
++ unsigned long this_load = this_rq->load.weight;
+ int i, scale;
+
+ this_rq->nr_load_updates++;
+- if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
+- goto do_avg;
+-
+- /* Update delta_fair/delta_exec fields first */
+- update_curr_load(this_rq);
+-
+- fair_delta64 = ls->delta_fair + 1;
+- ls->delta_fair = 0;
+-
+- exec_delta64 = ls->delta_exec + 1;
+- ls->delta_exec = 0;
+-
+- sample_interval64 = this_rq->clock - ls->load_update_last;
+- ls->load_update_last = this_rq->clock;
+-
+- if ((s64)sample_interval64 < (s64)TICK_NSEC)
+- sample_interval64 = TICK_NSEC;
+-
+- if (exec_delta64 > sample_interval64)
+- exec_delta64 = sample_interval64;
+-
+- idle_delta64 = sample_interval64 - exec_delta64;
+-
+- tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
+- tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
+-
+- this_load = (unsigned long)tmp64;
+-
+-do_avg:
+
+ /* Update our load: */
+ for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
+ unsigned long old_load, new_load;
+
+ /* scale is effectively 1 << i now, and >> i divides by scale */
+
+ old_load = this_rq->cpu_load[i];
+ new_load = this_load;
+-
++ /*
++ * Round up the averaging division if load is increasing. This
++ * prevents us from getting stuck on 9 if the load is 10, for
++ * example.
++ */
++ if (new_load > old_load)
++ new_load += scale-1;
+ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
+ }
+ }
+
+ #ifdef CONFIG_SMP
+@@ -2101,11 +2145,11 @@ static void double_lock_balance(struct r
+ }
+
+ /*
+ * If dest_cpu is allowed for this process, migrate the task to it.
+ * This is accomplished by forcing the cpu_allowed mask to only
+- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
++ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * the cpu_allowed mask is restored.
+ */
+ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
+ {
+ struct migration_req req;
+@@ -2176,44 +2220,69 @@ int can_migrate_task(struct task_struct
+ * We do not migrate tasks that are:
+ * 1) running (obviously), or
+ * 2) cannot be migrated to this CPU due to cpus_allowed, or
+ * 3) are cache-hot on their current CPU.
+ */
+- if (!cpu_isset(this_cpu, p->cpus_allowed))
++ if (!cpu_isset(this_cpu, p->cpus_allowed)) {
++ schedstat_inc(p, se.nr_failed_migrations_affine);
+ return 0;
++ }
+ *all_pinned = 0;
+
+- if (task_running(rq, p))
++ if (task_running(rq, p)) {
++ schedstat_inc(p, se.nr_failed_migrations_running);
+ return 0;
++ }
++
++ /*
++ * Aggressive migration if:
++ * 1) task is cache cold, or
++ * 2) too many balance attempts have failed.
++ */
++
++ if (!task_hot(p, rq->clock, sd) ||
++ sd->nr_balance_failed > sd->cache_nice_tries) {
++#ifdef CONFIG_SCHEDSTATS
++ if (task_hot(p, rq->clock, sd)) {
++ schedstat_inc(sd, lb_hot_gained[idle]);
++ schedstat_inc(p, se.nr_forced_migrations);
++ }
++#endif
++ return 1;
++ }
+
++ if (task_hot(p, rq->clock, sd)) {
++ schedstat_inc(p, se.nr_failed_migrations_hot);
++ return 0;
++ }
+ return 1;
+ }
+
+-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, unsigned long *load_moved,
+- int *this_best_prio, struct rq_iterator *iterator)
++static unsigned long
++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move, struct sched_domain *sd,
++ enum cpu_idle_type idle, int *all_pinned,
++ int *this_best_prio, struct rq_iterator *iterator)
+ {
+- int pulled = 0, pinned = 0, skip_for_load;
++ int loops = 0, pulled = 0, pinned = 0, skip_for_load;
+ struct task_struct *p;
+ long rem_load_move = max_load_move;
+
+- if (max_nr_move == 0 || max_load_move == 0)
++ if (max_load_move == 0)
+ goto out;
+
+ pinned = 1;
+
+ /*
+ * Start the load-balancing iterator:
+ */
+ p = iterator->start(iterator->arg);
+ next:
+- if (!p)
++ if (!p || loops++ > sysctl_sched_nr_migrate)
+ goto out;
+ /*
+- * To help distribute high priority tasks accross CPUs we don't
++ * To help distribute high priority tasks across CPUs we don't
+ * skip a task if it will be the highest priority task (i.e. smallest
+ * prio value) on its new queue regardless of its load weight
+ */
+ skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
+ SCHED_LOAD_SCALE_FUZZ;
+@@ -2226,31 +2295,30 @@ next:
+ pull_task(busiest, p, this_rq, this_cpu);
+ pulled++;
+ rem_load_move -= p->se.load.weight;
+
+ /*
+- * We only want to steal up to the prescribed number of tasks
+- * and the prescribed amount of weighted load.
++ * We only want to steal up to the prescribed amount of weighted load.
+ */
+- if (pulled < max_nr_move && rem_load_move > 0) {
++ if (rem_load_move > 0) {
+ if (p->prio < *this_best_prio)
+ *this_best_prio = p->prio;
+ p = iterator->next(iterator->arg);
+ goto next;
+ }
+ out:
+ /*
+- * Right now, this is the only place pull_task() is called,
++ * Right now, this is one of only two places pull_task() is called,
+ * so we can safely collect pull_task() stats here rather than
+ * inside pull_task().
+ */
+ schedstat_add(sd, lb_gained[idle], pulled);
+
+ if (all_pinned)
+ *all_pinned = pinned;
+- *load_moved = max_load_move - rem_load_move;
+- return pulled;
++
++ return max_load_move - rem_load_move;
+ }
+
+ /*
+ * move_tasks tries to move up to max_load_move weighted load from busiest to
+ * this_rq, as part of a balancing operation within domain "sd".
+@@ -2261,42 +2329,65 @@ out:
+ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned)
+ {
+- struct sched_class *class = sched_class_highest;
++ const struct sched_class *class = sched_class_highest;
+ unsigned long total_load_moved = 0;
+ int this_best_prio = this_rq->curr->prio;
+
+ do {
+ total_load_moved +=
+ class->load_balance(this_rq, this_cpu, busiest,
+- ULONG_MAX, max_load_move - total_load_moved,
++ max_load_move - total_load_moved,
+ sd, idle, all_pinned, &this_best_prio);
+ class = class->next;
+ } while (class && max_load_move > total_load_moved);
+
+ return total_load_moved > 0;
+ }
+
++static int
++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ struct rq_iterator *iterator)
++{
++ struct task_struct *p = iterator->start(iterator->arg);
++ int pinned = 0;
++
++ while (p) {
++ if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
++ pull_task(busiest, p, this_rq, this_cpu);
++ /*
++ * Right now, this is only the second place pull_task()
++ * is called, so we can safely collect pull_task()
++ * stats here rather than inside pull_task().
++ */
++ schedstat_inc(sd, lb_gained[idle]);
++
++ return 1;
++ }
++ p = iterator->next(iterator->arg);
++ }
++
++ return 0;
++}
++
+ /*
+ * move_one_task tries to move exactly one task from busiest to this_rq, as
+ * part of active balancing operations within "domain".
+ * Returns 1 if successful and 0 otherwise.
+ *
+ * Called with both runqueues locked.
+ */
+ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ struct sched_domain *sd, enum cpu_idle_type idle)
+ {
+- struct sched_class *class;
+- int this_best_prio = MAX_PRIO;
++ const struct sched_class *class;
+
+ for (class = sched_class_highest; class; class = class->next)
+- if (class->load_balance(this_rq, this_cpu, busiest,
+- 1, ULONG_MAX, sd, idle, NULL,
+- &this_best_prio))
++ if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
+ return 1;
+
+ return 0;
+ }
+
+@@ -2313,11 +2404,11 @@ find_busiest_group(struct sched_domain *
+ struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
+ unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+ unsigned long max_pull;
+ unsigned long busiest_load_per_task, busiest_nr_running;
+ unsigned long this_load_per_task, this_nr_running;
+- int load_idx;
++ int load_idx, group_imb = 0;
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ int power_savings_balance = 1;
+ unsigned long leader_nr_running = 0, min_load_per_task = 0;
+ unsigned long min_nr_running = ULONG_MAX;
+ struct sched_group *group_min = NULL, *group_leader = NULL;
+@@ -2332,23 +2423,26 @@ find_busiest_group(struct sched_domain *
+ load_idx = sd->newidle_idx;
+ else
+ load_idx = sd->idle_idx;
+
+ do {
+- unsigned long load, group_capacity;
++ unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
+ int local_group;
+ int i;
++ int __group_imb = 0;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
+ unsigned long sum_nr_running, sum_weighted_load;
+
+ local_group = cpu_isset(this_cpu, group->cpumask);
+
+ if (local_group)
+ balance_cpu = first_cpu(group->cpumask);
+
+ /* Tally up the load of all CPUs in the group */
+ sum_weighted_load = sum_nr_running = avg_load = 0;
++ max_cpu_load = 0;
++ min_cpu_load = ~0UL;
+
+ for_each_cpu_mask(i, group->cpumask) {
+ struct rq *rq;
+
+ if (!cpu_isset(i, *cpus))
+@@ -2365,12 +2459,17 @@ find_busiest_group(struct sched_domain *
+ first_idle_cpu = 1;
+ balance_cpu = i;
+ }
+
+ load = target_load(i, load_idx);
+- } else
++ } else {
+ load = source_load(i, load_idx);
++ if (load > max_cpu_load)
++ max_cpu_load = load;
++ if (min_cpu_load > load)
++ min_cpu_load = load;
++ }
+
+ avg_load += load;
+ sum_nr_running += rq->nr_running;
+ sum_weighted_load += weighted_cpuload(i);
+ }
+@@ -2392,23 +2491,27 @@ find_busiest_group(struct sched_domain *
+
+ /* Adjust by relative CPU power of the group */
+ avg_load = sg_div_cpu_power(group,
+ avg_load * SCHED_LOAD_SCALE);
+
++ if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
++ __group_imb = 1;
++
+ group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+
+ if (local_group) {
+ this_load = avg_load;
+ this = group;
+ this_nr_running = sum_nr_running;
+ this_load_per_task = sum_weighted_load;
+ } else if (avg_load > max_load &&
+- sum_nr_running > group_capacity) {
++ (sum_nr_running > group_capacity || __group_imb)) {
+ max_load = avg_load;
+ busiest = group;
+ busiest_nr_running = sum_nr_running;
+ busiest_load_per_task = sum_weighted_load;
++ group_imb = __group_imb;
+ }
+
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ /*
+ * Busy processors will not participate in power savings
+@@ -2476,19 +2579,22 @@ group_next:
+ if (this_load >= avg_load ||
+ 100*max_load <= sd->imbalance_pct*this_load)
+ goto out_balanced;
+
+ busiest_load_per_task /= busiest_nr_running;
++ if (group_imb)
++ busiest_load_per_task = min(busiest_load_per_task, avg_load);
++
+ /*
+ * We're trying to get all the cpus to the average_load, so we don't
+ * want to push ourselves above the average load, nor do we wish to
+ * reduce the max loaded cpu below the average load, as either of these
+ * actions would just result in more rebalancing later, and ping-pong
+ * tasks around. Thus we look for the minimum possible imbalance.
+ * Negative imbalances (*we* are more loaded than anyone else) will
+ * be counted as no imbalance for these purposes -- we can't fix that
+- * by pulling tasks to us. Be careful of negative numbers as they'll
++ * by pulling tasks to us. Be careful of negative numbers as they'll
+ * appear as very large values with unsigned longs.
+ */
+ if (max_load <= busiest_load_per_task)
+ goto out_balanced;
+
+@@ -2650,11 +2756,11 @@ static int load_balance(int this_cpu, st
+ */
+ if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
+ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
+ sd_idle = 1;
+
+- schedstat_inc(sd, lb_cnt[idle]);
++ schedstat_inc(sd, lb_count[idle]);
+
+ redo:
+ group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+ &cpus, balance);
+
+@@ -2803,11 +2909,11 @@ load_balance_newidle(int this_cpu, struc
+ */
+ if (sd->flags & SD_SHARE_CPUPOWER &&
+ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
+ sd_idle = 1;
+
+- schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
++ schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
+ redo:
+ group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
+ &sd_idle, &cpus, NULL);
+ if (!group) {
+ schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
+@@ -2919,11 +3025,11 @@ static void active_load_balance(struct r
+
+ target_rq = cpu_rq(target_cpu);
+
+ /*
+ * This condition is "impossible", if it occurs
+- * we need to fix it. Originally reported by
++ * we need to fix it. Originally reported by
+ * Bjorn Helgaas on a 128-cpu setup.
+ */
+ BUG_ON(busiest_rq == target_rq);
+
+ /* move a task from busiest_rq to target_rq */
+@@ -2937,11 +3043,11 @@ static void active_load_balance(struct r
+ cpu_isset(busiest_cpu, sd->span))
+ break;
+ }
+
+ if (likely(sd)) {
+- schedstat_inc(sd, alb_cnt);
++ schedstat_inc(sd, alb_count);
+
+ if (move_one_task(target_rq, target_cpu, busiest_rq,
+ sd, CPU_IDLE))
+ schedstat_inc(sd, alb_pushed);
+ else
+@@ -2951,11 +3057,11 @@ static void active_load_balance(struct r
+ }
+
+ #ifdef CONFIG_NO_HZ
+ static struct {
+ atomic_t load_balancer;
+- cpumask_t cpu_mask;
++ cpumask_t cpu_mask;
+ } nohz ____cacheline_aligned = {
+ .load_balancer = ATOMIC_INIT(-1),
+ .cpu_mask = CPU_MASK_NONE,
+ };
+
+@@ -3030,11 +3136,11 @@ static DEFINE_SPINLOCK(balancing);
+ * It checks each scheduling domain to see if it is due to be balanced,
+ * and initiates a balancing operation if so.
+ *
+ * Balancing parameters are set up in arch_init_sched_domains.
+ */
+-static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
++static void rebalance_domains(int cpu, enum cpu_idle_type idle)
+ {
+ int balance = 1;
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long interval;
+ struct sched_domain *sd;
+@@ -3214,22 +3320,10 @@ static inline void trigger_load_balance(
+ */
+ static inline void idle_balance(int cpu, struct rq *rq)
+ {
+ }
+
+-/* Avoid "used but not defined" warning on UP */
+-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, unsigned long *load_moved,
+- int *this_best_prio, struct rq_iterator *iterator)
+-{
+- *load_moved = 0;
+-
+- return 0;
+-}
+-
+ #endif
+
+ DEFINE_PER_CPU(struct kernel_stat, kstat);
+
+ EXPORT_PER_CPU_SYMBOL(kstat);
+@@ -3244,11 +3338,11 @@ unsigned long long task_sched_runtime(st
+ u64 ns, delta_exec;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &flags);
+ ns = p->se.sum_exec_runtime;
+- if (rq->curr == p) {
++ if (task_current(rq, p)) {
+ update_rq_clock(rq);
+ delta_exec = rq->clock - p->se.exec_start;
+ if ((s64)delta_exec > 0)
+ ns += delta_exec;
+ }
+@@ -3258,11 +3352,10 @@ unsigned long long task_sched_runtime(st
+ }
+
+ /*
+ * Account user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+- * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in user space since the last update
+ */
+ void account_user_time(struct task_struct *p, cputime_t cputime)
+ {
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+@@ -3277,10 +3370,39 @@ void account_user_time(struct task_struc
+ else
+ cpustat->user = cputime64_add(cpustat->user, tmp);
+ }
+
+ /*
++ * Account guest cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in virtual machine since the last update
++ */
++static void account_guest_time(struct task_struct *p, cputime_t cputime)
++{
++ cputime64_t tmp;
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++
++ tmp = cputime_to_cputime64(cputime);
++
++ p->utime = cputime_add(p->utime, cputime);
++ p->gtime = cputime_add(p->gtime, cputime);
++
++ cpustat->user = cputime64_add(cpustat->user, tmp);
++ cpustat->guest = cputime64_add(cpustat->guest, tmp);
++}
++
++/*
++ * Account scaled user cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in user space since the last update
++ */
++void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
++{
++ p->utimescaled = cputime_add(p->utimescaled, cputime);
++}
++
++/*
+ * Account system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ */
+@@ -3289,10 +3411,13 @@ void account_system_time(struct task_str
+ {
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ struct rq *rq = this_rq();
+ cputime64_t tmp;
+
++ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
++ return account_guest_time(p, cputime);
++
+ p->stime = cputime_add(p->stime, cputime);
+
+ /* Add system time to cpustat. */
+ tmp = cputime_to_cputime64(cputime);
+ if (hardirq_count() - hardirq_offset)
+@@ -3308,10 +3433,21 @@ void account_system_time(struct task_str
+ /* Account for system time used */
+ acct_update_integrals(p);
+ }
+
+ /*
++ * Account scaled system cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @hardirq_offset: the offset to subtract from hardirq_count()
++ * @cputime: the cpu time spent in kernel space since the last update
++ */
++void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
++{
++ p->stimescaled = cputime_add(p->stimescaled, cputime);
++}
++
++/*
+ * Account for involuntary wait time.
+ * @p: the process from which the cpu time has been stolen
+ * @steal: the cpu time spent in involuntary wait
+ */
+ void account_steal_time(struct task_struct *p, cputime_t steal)
+@@ -3404,43 +3540,56 @@ EXPORT_SYMBOL(sub_preempt_count);
+ /*
+ * Print scheduling while atomic bug:
+ */
+ static noinline void __schedule_bug(struct task_struct *prev)
+ {
+- printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
+- prev->comm, preempt_count(), prev->pid);
++ struct pt_regs *regs = get_irq_regs();
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
+ debug_show_held_locks(prev);
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
+- dump_stack();
++
++ if (regs)
++ show_regs(regs);
++ else
++ dump_stack();
+ }
+
+ /*
+ * Various schedule()-time debugging checks and statistics:
+ */
+ static inline void schedule_debug(struct task_struct *prev)
+ {
+ /*
+- * Test if we are atomic. Since do_exit() needs to call into
++ * Test if we are atomic. Since do_exit() needs to call into
+ * schedule() atomically, we ignore that path for now.
+ * Otherwise, whine if we are scheduling when we should not be.
+ */
+ if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
+ __schedule_bug(prev);
+
+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+- schedstat_inc(this_rq(), sched_cnt);
++ schedstat_inc(this_rq(), sched_count);
++#ifdef CONFIG_SCHEDSTATS
++ if (unlikely(prev->lock_depth >= 0)) {
++ schedstat_inc(this_rq(), bkl_count);
++ schedstat_inc(prev, sched_info.bkl_count);
++ }
++#endif
+ }
+
+ /*
+ * Pick up the highest-prio task:
+ */
+ static inline struct task_struct *
+ pick_next_task(struct rq *rq, struct task_struct *prev)
+ {
+- struct sched_class *class;
++ const struct sched_class *class;
+ struct task_struct *p;
+
+ /*
+ * Optimization: we know that if all tasks are in
+ * the fair class we can call that function directly:
+@@ -3485,13 +3634,17 @@ need_resched:
+ release_kernel_lock(prev);
+ need_resched_nonpreemptible:
+
+ schedule_debug(prev);
+
+- spin_lock_irq(&rq->lock);
+- clear_tsk_need_resched(prev);
++ /*
++ * Do the rq-clock update outside the rq lock:
++ */
++ local_irq_disable();
+ __update_rq_clock(rq);
++ spin_lock(&rq->lock);
++ clear_tsk_need_resched(prev);
+
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+ unlikely(signal_pending(prev)))) {
+ prev->state = TASK_RUNNING;
+@@ -3530,11 +3683,11 @@ need_resched_nonpreemptible:
+ EXPORT_SYMBOL(schedule);
+
+ #ifdef CONFIG_PREEMPT
+ /*
+ * this is the entry point to schedule() from in-kernel preemption
+- * off of preempt_enable. Kernel preemptions off return from interrupt
++ * off of preempt_enable. Kernel preemptions off return from interrupt
+ * occur there and call schedule directly.
+ */
+ asmlinkage void __sched preempt_schedule(void)
+ {
+ struct thread_info *ti = current_thread_info();
+@@ -3542,36 +3695,39 @@ asmlinkage void __sched preempt_schedule
+ struct task_struct *task = current;
+ int saved_lock_depth;
+ #endif
+ /*
+ * If there is a non-zero preempt_count or interrupts are disabled,
+- * we do not want to preempt the current task. Just return..
++ * we do not want to preempt the current task. Just return..
+ */
+ if (likely(ti->preempt_count || irqs_disabled()))
+ return;
+
+-need_resched:
+- add_preempt_count(PREEMPT_ACTIVE);
+- /*
+- * We keep the big kernel semaphore locked, but we
+- * clear ->lock_depth so that schedule() doesnt
+- * auto-release the semaphore:
+- */
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++
++ /*
++ * We keep the big kernel semaphore locked, but we
++ * clear ->lock_depth so that schedule() doesnt
++ * auto-release the semaphore:
++ */
+ #ifdef CONFIG_PREEMPT_BKL
+- saved_lock_depth = task->lock_depth;
+- task->lock_depth = -1;
++ saved_lock_depth = task->lock_depth;
++ task->lock_depth = -1;
+ #endif
+- schedule();
++ schedule();
+ #ifdef CONFIG_PREEMPT_BKL
+- task->lock_depth = saved_lock_depth;
++ task->lock_depth = saved_lock_depth;
+ #endif
+- sub_preempt_count(PREEMPT_ACTIVE);
++ sub_preempt_count(PREEMPT_ACTIVE);
+
+- /* we could miss a preemption opportunity between schedule and now */
+- barrier();
+- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+- goto need_resched;
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+ }
+ EXPORT_SYMBOL(preempt_schedule);
+
+ /*
+ * this is the entry point to schedule() from kernel preemption
+@@ -3587,33 +3743,36 @@ asmlinkage void __sched preempt_schedule
+ int saved_lock_depth;
+ #endif
+ /* Catch callers which need to be fixed */
+ BUG_ON(ti->preempt_count || !irqs_disabled());
+
+-need_resched:
+- add_preempt_count(PREEMPT_ACTIVE);
+- /*
+- * We keep the big kernel semaphore locked, but we
+- * clear ->lock_depth so that schedule() doesnt
+- * auto-release the semaphore:
+- */
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++
++ /*
++ * We keep the big kernel semaphore locked, but we
++ * clear ->lock_depth so that schedule() doesnt
++ * auto-release the semaphore:
++ */
+ #ifdef CONFIG_PREEMPT_BKL
+- saved_lock_depth = task->lock_depth;
+- task->lock_depth = -1;
++ saved_lock_depth = task->lock_depth;
++ task->lock_depth = -1;
+ #endif
+- local_irq_enable();
+- schedule();
+- local_irq_disable();
++ local_irq_enable();
++ schedule();
++ local_irq_disable();
+ #ifdef CONFIG_PREEMPT_BKL
+- task->lock_depth = saved_lock_depth;
++ task->lock_depth = saved_lock_depth;
+ #endif
+- sub_preempt_count(PREEMPT_ACTIVE);
++ sub_preempt_count(PREEMPT_ACTIVE);
+
+- /* we could miss a preemption opportunity between schedule and now */
+- barrier();
+- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+- goto need_resched;
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+ }
+
+ #endif /* CONFIG_PREEMPT */
+
+ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+@@ -3622,25 +3781,24 @@ int default_wake_function(wait_queue_t *
+ return try_to_wake_up(curr->private, mode, sync);
+ }
+ EXPORT_SYMBOL(default_wake_function);
+
+ /*
+- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
++ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
++ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * number) then we wake all the non-exclusive tasks and one exclusive task.
+ *
+ * There are circumstances in which we can try to wake a task which has already
+- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
++ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * zero in this (rare) case, and we handle it by continuing to scan the queue.
+ */
+ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, int sync, void *key)
+ {
+- struct list_head *tmp, *next;
++ wait_queue_t *curr, *next;
+
+- list_for_each_safe(tmp, next, &q->task_list) {
+- wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
++ list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
+ unsigned flags = curr->flags;
+
+ if (curr->func(curr, mode, sync, key) &&
+ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+ break;
+@@ -3702,11 +3860,11 @@ __wake_up_sync(wait_queue_head_t *q, uns
+ __wake_up_common(q, mode, nr_exclusive, sync, NULL);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+
+-void fastcall complete(struct completion *x)
++void complete(struct completion *x)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+@@ -3714,11 +3872,11 @@ void fastcall complete(struct completion
+ 1, 0, NULL);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete);
+
+-void fastcall complete_all(struct completion *x)
++void complete_all(struct completion *x)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done += UINT_MAX/2;
+@@ -3726,210 +3884,123 @@ void fastcall complete_all(struct comple
+ 0, 0, NULL);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete_all);
+
+-void fastcall __sched wait_for_completion(struct completion *x)
+-{
+- might_sleep();
+-
+- spin_lock_irq(&x->wait.lock);
+- if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
+-
+- wait.flags |= WQ_FLAG_EXCLUSIVE;
+- __add_wait_queue_tail(&x->wait, &wait);
+- do {
+- __set_current_state(TASK_UNINTERRUPTIBLE);
+- spin_unlock_irq(&x->wait.lock);
+- schedule();
+- spin_lock_irq(&x->wait.lock);
+- } while (!x->done);
+- __remove_wait_queue(&x->wait, &wait);
+- }
+- x->done--;
+- spin_unlock_irq(&x->wait.lock);
+-}
+-EXPORT_SYMBOL(wait_for_completion);
+-
+-unsigned long fastcall __sched
+-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++static inline long __sched
++do_wait_for_common(struct completion *x, long timeout, int state)
+ {
+- might_sleep();
+-
+- spin_lock_irq(&x->wait.lock);
+ if (!x->done) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ wait.flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue_tail(&x->wait, &wait);
+ do {
+- __set_current_state(TASK_UNINTERRUPTIBLE);
++ if (state == TASK_INTERRUPTIBLE &&
++ signal_pending(current)) {
++ __remove_wait_queue(&x->wait, &wait);
++ return -ERESTARTSYS;
++ }
++ __set_current_state(state);
+ spin_unlock_irq(&x->wait.lock);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irq(&x->wait.lock);
+ if (!timeout) {
+ __remove_wait_queue(&x->wait, &wait);
+- goto out;
++ return timeout;
+ }
+ } while (!x->done);
+ __remove_wait_queue(&x->wait, &wait);
+ }
+ x->done--;
+-out:
+- spin_unlock_irq(&x->wait.lock);
+ return timeout;
+ }
+-EXPORT_SYMBOL(wait_for_completion_timeout);
+
+-int fastcall __sched wait_for_completion_interruptible(struct completion *x)
++static long __sched
++wait_for_common(struct completion *x, long timeout, int state)
+ {
+- int ret = 0;
+-
+ might_sleep();
+
+ spin_lock_irq(&x->wait.lock);
+- if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
+-
+- wait.flags |= WQ_FLAG_EXCLUSIVE;
+- __add_wait_queue_tail(&x->wait, &wait);
+- do {
+- if (signal_pending(current)) {
+- ret = -ERESTARTSYS;
+- __remove_wait_queue(&x->wait, &wait);
+- goto out;
+- }
+- __set_current_state(TASK_INTERRUPTIBLE);
+- spin_unlock_irq(&x->wait.lock);
+- schedule();
+- spin_lock_irq(&x->wait.lock);
+- } while (!x->done);
+- __remove_wait_queue(&x->wait, &wait);
+- }
+- x->done--;
+-out:
++ timeout = do_wait_for_common(x, timeout, state);
+ spin_unlock_irq(&x->wait.lock);
+-
+- return ret;
++ return timeout;
+ }
+-EXPORT_SYMBOL(wait_for_completion_interruptible);
+
+-unsigned long fastcall __sched
+-wait_for_completion_interruptible_timeout(struct completion *x,
+- unsigned long timeout)
++void __sched wait_for_completion(struct completion *x)
+ {
+- might_sleep();
+-
+- spin_lock_irq(&x->wait.lock);
+- if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
++ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(wait_for_completion);
+
+- wait.flags |= WQ_FLAG_EXCLUSIVE;
+- __add_wait_queue_tail(&x->wait, &wait);
+- do {
+- if (signal_pending(current)) {
+- timeout = -ERESTARTSYS;
+- __remove_wait_queue(&x->wait, &wait);
+- goto out;
+- }
+- __set_current_state(TASK_INTERRUPTIBLE);
+- spin_unlock_irq(&x->wait.lock);
+- timeout = schedule_timeout(timeout);
+- spin_lock_irq(&x->wait.lock);
+- if (!timeout) {
+- __remove_wait_queue(&x->wait, &wait);
+- goto out;
+- }
+- } while (!x->done);
+- __remove_wait_queue(&x->wait, &wait);
+- }
+- x->done--;
+-out:
+- spin_unlock_irq(&x->wait.lock);
+- return timeout;
++unsigned long __sched
++wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++{
++ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
+ }
+-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
++EXPORT_SYMBOL(wait_for_completion_timeout);
+
+-static inline void
+-sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
++int __sched wait_for_completion_interruptible(struct completion *x)
+ {
+- spin_lock_irqsave(&q->lock, *flags);
+- __add_wait_queue(q, wait);
+- spin_unlock(&q->lock);
++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
++ if (t == -ERESTARTSYS)
++ return t;
++ return 0;
+ }
++EXPORT_SYMBOL(wait_for_completion_interruptible);
+
+-static inline void
+-sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
++unsigned long __sched
++wait_for_completion_interruptible_timeout(struct completion *x,
++ unsigned long timeout)
+ {
+- spin_lock_irq(&q->lock);
+- __remove_wait_queue(q, wait);
+- spin_unlock_irqrestore(&q->lock, *flags);
++ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
+ }
++EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+
+-void __sched interruptible_sleep_on(wait_queue_head_t *q)
++static long __sched
++sleep_on_common(wait_queue_head_t *q, int state, long timeout)
+ {
+ unsigned long flags;
+ wait_queue_t wait;
+
+ init_waitqueue_entry(&wait, current);
+
+- current->state = TASK_INTERRUPTIBLE;
++ __set_current_state(state);
+
+- sleep_on_head(q, &wait, &flags);
+- schedule();
+- sleep_on_tail(q, &wait, &flags);
++ spin_lock_irqsave(&q->lock, flags);
++ __add_wait_queue(q, &wait);
++ spin_unlock(&q->lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&q->lock);
++ __remove_wait_queue(q, &wait);
++ spin_unlock_irqrestore(&q->lock, flags);
++
++ return timeout;
++}
++
++void __sched interruptible_sleep_on(wait_queue_head_t *q)
++{
++ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
+ EXPORT_SYMBOL(interruptible_sleep_on);
+
+ long __sched
+ interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+ {
+- unsigned long flags;
+- wait_queue_t wait;
+-
+- init_waitqueue_entry(&wait, current);
+-
+- current->state = TASK_INTERRUPTIBLE;
+-
+- sleep_on_head(q, &wait, &flags);
+- timeout = schedule_timeout(timeout);
+- sleep_on_tail(q, &wait, &flags);
+-
+- return timeout;
++ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
+ }
+ EXPORT_SYMBOL(interruptible_sleep_on_timeout);
+
+ void __sched sleep_on(wait_queue_head_t *q)
+ {
+- unsigned long flags;
+- wait_queue_t wait;
+-
+- init_waitqueue_entry(&wait, current);
+-
+- current->state = TASK_UNINTERRUPTIBLE;
+-
+- sleep_on_head(q, &wait, &flags);
+- schedule();
+- sleep_on_tail(q, &wait, &flags);
++ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
+ EXPORT_SYMBOL(sleep_on);
+
+ long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+ {
+- unsigned long flags;
+- wait_queue_t wait;
+-
+- init_waitqueue_entry(&wait, current);
+-
+- current->state = TASK_UNINTERRUPTIBLE;
+-
+- sleep_on_head(q, &wait, &flags);
+- timeout = schedule_timeout(timeout);
+- sleep_on_tail(q, &wait, &flags);
+-
+- return timeout;
++ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
+ }
+ EXPORT_SYMBOL(sleep_on_timeout);
+
+ #ifdef CONFIG_RT_MUTEXES
+
+@@ -3944,38 +4015,44 @@ EXPORT_SYMBOL(sleep_on_timeout);
+ * Used by the rt_mutex code to implement priority inheritance logic.
+ */
+ void rt_mutex_setprio(struct task_struct *p, int prio)
+ {
+ unsigned long flags;
+- int oldprio, on_rq;
++ int oldprio, on_rq, running;
+ struct rq *rq;
+
+ BUG_ON(prio < 0 || prio > MAX_PRIO);
+
+ rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
+
+ oldprio = p->prio;
+ on_rq = p->se.on_rq;
+- if (on_rq)
++ running = task_current(rq, p);
++ if (on_rq) {
+ dequeue_task(rq, p, 0);
++ if (running)
++ p->sched_class->put_prev_task(rq, p);
++ }
+
+ if (rt_prio(prio))
+ p->sched_class = &rt_sched_class;
+ else
+ p->sched_class = &fair_sched_class;
+
+ p->prio = prio;
+
+ if (on_rq) {
++ if (running)
++ p->sched_class->set_curr_task(rq);
+ enqueue_task(rq, p, 0);
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+- if (task_running(rq, p)) {
++ if (running) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else {
+ check_preempt_curr(rq, p);
+ }
+@@ -4135,13 +4212,13 @@ struct task_struct *idle_task(int cpu)
+
+ /**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ */
+-static inline struct task_struct *find_process_by_pid(pid_t pid)
++static struct task_struct *find_process_by_pid(pid_t pid)
+ {
+- return pid ? find_task_by_pid(pid) : current;
++ return pid ? find_task_by_vpid(pid) : current;
+ }
+
+ /* Actually do priority change: must hold rq lock. */
+ static void
+ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
+@@ -4177,11 +4254,11 @@ __setscheduler(struct rq *rq, struct tas
+ * NOTE that the task may be already dead.
+ */
+ int sched_setscheduler(struct task_struct *p, int policy,
+ struct sched_param *param)
+ {
+- int retval, oldprio, oldpolicy = -1, on_rq;
++ int retval, oldprio, oldpolicy = -1, on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+
+ /* may grab non-irq protected spin_locks */
+ BUG_ON(in_interrupt());
+@@ -4259,22 +4336,30 @@ recheck:
+ spin_unlock_irqrestore(&p->pi_lock, flags);
+ goto recheck;
+ }
+ update_rq_clock(rq);
+ on_rq = p->se.on_rq;
+- if (on_rq)
++ running = task_current(rq, p);
++ if (on_rq) {
+ deactivate_task(rq, p, 0);
++ if (running)
++ p->sched_class->put_prev_task(rq, p);
++ }
++
+ oldprio = p->prio;
+ __setscheduler(rq, p, policy, param->sched_priority);
++
+ if (on_rq) {
++ if (running)
++ p->sched_class->set_curr_task(rq);
+ activate_task(rq, p, 0);
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+- if (task_running(rq, p)) {
++ if (running) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else {
+ check_preempt_curr(rq, p);
+ }
+@@ -4314,12 +4399,12 @@ do_sched_setscheduler(pid_t pid, int pol
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ */
+-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+- struct sched_param __user *param)
++asmlinkage long
++sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+ {
+ /* negative values for policy are not valid */
+ if (policy < 0)
+ return -EINVAL;
+
+@@ -4341,26 +4426,24 @@ asmlinkage long sys_sched_setparam(pid_t
+ * @pid: the pid in question.
+ */
+ asmlinkage long sys_sched_getscheduler(pid_t pid)
+ {
+ struct task_struct *p;
+- int retval = -EINVAL;
++ int retval;
+
+ if (pid < 0)
+- goto out_nounlock;
++ return -EINVAL;
+
+ retval = -ESRCH;
+ read_lock(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ if (p) {
+ retval = security_task_getscheduler(p);
+ if (!retval)
+ retval = p->policy;
+ }
+ read_unlock(&tasklist_lock);
+-
+-out_nounlock:
+ return retval;
+ }
+
+ /**
+ * sys_sched_getscheduler - get the RT priority of a thread
+@@ -4369,14 +4452,14 @@ out_nounlock:
+ */
+ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
+ {
+ struct sched_param lp;
+ struct task_struct *p;
+- int retval = -EINVAL;
++ int retval;
+
+ if (!param || pid < 0)
+- goto out_nounlock;
++ return -EINVAL;
+
+ read_lock(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+@@ -4392,11 +4475,10 @@ asmlinkage long sys_sched_getparam(pid_t
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+-out_nounlock:
+ return retval;
+
+ out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
+@@ -4418,11 +4500,11 @@ long sched_setaffinity(pid_t pid, cpumas
+ return -ESRCH;
+ }
+
+ /*
+ * It is not safe to call set_cpus_allowed with the
+- * tasklist_lock held. We will bump the task_struct's
++ * tasklist_lock held. We will bump the task_struct's
+ * usage count and then drop tasklist_lock.
+ */
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+
+@@ -4435,12 +4517,25 @@ long sched_setaffinity(pid_t pid, cpumas
+ if (retval)
+ goto out_unlock;
+
+ cpus_allowed = cpuset_cpus_allowed(p);
+ cpus_and(new_mask, new_mask, cpus_allowed);
++ again:
+ retval = set_cpus_allowed(p, new_mask);
+
++ if (!retval) {
++ cpus_allowed = cpuset_cpus_allowed(p);
++ if (!cpus_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ new_mask = cpus_allowed;
++ goto again;
++ }
++ }
+ out_unlock:
+ put_task_struct(p);
+ mutex_unlock(&sched_hotcpu_mutex);
+ return retval;
+ }
+@@ -4552,12 +4647,12 @@ asmlinkage long sys_sched_getaffinity(pi
+ */
+ asmlinkage long sys_sched_yield(void)
+ {
+ struct rq *rq = this_rq_lock();
+
+- schedstat_inc(rq, yld_cnt);
+- current->sched_class->yield_task(rq, current);
++ schedstat_inc(rq, yld_count);
++ current->sched_class->yield_task(rq);
+
+ /*
+ * Since we are going to call schedule() anyway, there's
+ * no need to preempt or enable interrupts:
+ */
+@@ -4601,11 +4696,11 @@ EXPORT_SYMBOL(cond_resched);
+
+ /*
+ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
+ */
+ int cond_resched_lock(spinlock_t *lock)
+ {
+@@ -4655,11 +4750,11 @@ void __sched yield(void)
+ sys_sched_yield();
+ }
+ EXPORT_SYMBOL(yield);
+
+ /*
+- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+@@ -4747,15 +4842,16 @@ asmlinkage long sys_sched_get_priority_m
+ */
+ asmlinkage
+ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
+ {
+ struct task_struct *p;
+- int retval = -EINVAL;
++ unsigned int time_slice;
++ int retval;
+ struct timespec t;
+
+ if (pid < 0)
+- goto out_nounlock;
++ return -EINVAL;
+
+ retval = -ESRCH;
+ read_lock(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ if (!p)
+@@ -4763,16 +4859,32 @@ long sys_sched_rr_get_interval(pid_t pid
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+- jiffies_to_timespec(p->policy == SCHED_FIFO ?
+- 0 : static_prio_timeslice(p->static_prio), &t);
++ /*
++ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
++ * tasks that are on an otherwise idle runqueue:
++ */
++ time_slice = 0;
++ if (p->policy == SCHED_RR) {
++ time_slice = DEF_TIMESLICE;
++ } else {
++ struct sched_entity *se = &p->se;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ if (rq->cfs.load.weight)
++ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
++ task_rq_unlock(rq, &flags);
++ }
+ read_unlock(&tasklist_lock);
++ jiffies_to_timespec(time_slice, &t);
+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
+-out_nounlock:
+ return retval;
++
+ out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
+ }
+
+@@ -4782,32 +4894,33 @@ static void show_task(struct task_struct
+ {
+ unsigned long free = 0;
+ unsigned state;
+
+ state = p->state ? __ffs(p->state) + 1 : 0;
+- printk("%-13.13s %c", p->comm,
++ printk(KERN_INFO "%-13.13s %c", p->comm,
+ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
+ #if BITS_PER_LONG == 32
+ if (state == TASK_RUNNING)
+- printk(" running ");
++ printk(KERN_CONT " running ");
+ else
+- printk(" %08lx ", thread_saved_pc(p));
++ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
+ #else
+ if (state == TASK_RUNNING)
+- printk(" running task ");
++ printk(KERN_CONT " running task ");
+ else
+- printk(" %016lx ", thread_saved_pc(p));
++ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
+ #endif
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+ {
+ unsigned long *n = end_of_stack(p);
+ while (!*n)
+ n++;
+ free = (unsigned long)n - (unsigned long)end_of_stack(p);
+ }
+ #endif
+- printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid);
++ printk(KERN_CONT "%5lu %5d %6d\n", free,
++ task_pid_nr(p), task_pid_nr(p->parent));
+
+ if (state != TASK_RUNNING)
+ show_stack(p, NULL);
+ }
+
+@@ -4909,22 +5022,22 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+ static inline void sched_init_granularity(void)
+ {
+ unsigned int factor = 1 + ilog2(num_online_cpus());
+- const unsigned long limit = 100000000;
++ const unsigned long limit = 200000000;
+
+ sysctl_sched_min_granularity *= factor;
+ if (sysctl_sched_min_granularity > limit)
+ sysctl_sched_min_granularity = limit;
+
+ sysctl_sched_latency *= factor;
+ if (sysctl_sched_latency > limit)
+ sysctl_sched_latency = limit;
+
+- sysctl_sched_runtime_limit = sysctl_sched_latency;
+- sysctl_sched_wakeup_granularity = sysctl_sched_min_granularity / 2;
++ sysctl_sched_wakeup_granularity *= factor;
++ sysctl_sched_batch_wakeup_granularity *= factor;
+ }
+
+ #ifdef CONFIG_SMP
+ /*
+ * This is how migration works:
+@@ -4946,11 +5059,11 @@ static inline void sched_init_granularit
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+- * task must not exit() & deallocate itself prematurely. The
++ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+ {
+ struct migration_req req;
+@@ -4983,11 +5096,11 @@ out:
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed);
+
+ /*
+- * Move (not current) task off this cpu, onto dest cpu. We're doing
++ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+ * away from this CPU, or CPU going down), or because we're
+ * attempting to rebalance this task on exec (sched_exec).
+ *
+ * So we race with normal scheduler movements, but that's OK, as long
+@@ -5045,10 +5158,12 @@ static int migration_thread(void *data)
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ struct migration_req *req;
+ struct list_head *head;
+
++ try_to_freeze();
++
+ spin_lock_irq(&rq->lock);
+
+ if (cpu_is_offline(cpu)) {
+ spin_unlock_irq(&rq->lock);
+ goto wait_to_die;
+@@ -5089,50 +5204,69 @@ wait_to_die:
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
++
++static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
++{
++ int ret;
++
++ local_irq_disable();
++ ret = __migrate_task(p, src_cpu, dest_cpu);
++ local_irq_enable();
++ return ret;
++}
++
+ /*
+- * Figure out where task on dead CPU should go, use force if neccessary.
++ * Figure out where task on dead CPU should go, use force if necessary.
+ * NOTE: interrupts should be disabled by the caller
+ */
+ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+ {
+ unsigned long flags;
+ cpumask_t mask;
+ struct rq *rq;
+ int dest_cpu;
+
+-restart:
+- /* On same node? */
+- mask = node_to_cpumask(cpu_to_node(dead_cpu));
+- cpus_and(mask, mask, p->cpus_allowed);
+- dest_cpu = any_online_cpu(mask);
+-
+- /* On any allowed CPU? */
+- if (dest_cpu == NR_CPUS)
+- dest_cpu = any_online_cpu(p->cpus_allowed);
++ do {
++ /* On same node? */
++ mask = node_to_cpumask(cpu_to_node(dead_cpu));
++ cpus_and(mask, mask, p->cpus_allowed);
++ dest_cpu = any_online_cpu(mask);
++
++ /* On any allowed CPU? */
++ if (dest_cpu == NR_CPUS)
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++
++ /* No more Mr. Nice Guy. */
++ if (dest_cpu == NR_CPUS) {
++ cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
++ /*
++ * Try to stay on the same cpuset, where the
++ * current cpuset may be a subset of all cpus.
++ * The cpuset_cpus_allowed_locked() variant of
++ * cpuset_cpus_allowed() will not block. It must be
++ * called within calls to cpuset_lock/cpuset_unlock.
++ */
++ rq = task_rq_lock(p, &flags);
++ p->cpus_allowed = cpus_allowed;
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++ task_rq_unlock(rq, &flags);
+
+- /* No more Mr. Nice Guy. */
+- if (dest_cpu == NR_CPUS) {
+- rq = task_rq_lock(p, &flags);
+- cpus_setall(p->cpus_allowed);
+- dest_cpu = any_online_cpu(p->cpus_allowed);
+- task_rq_unlock(rq, &flags);
+-
+- /*
+- * Don't tell them about moving exiting tasks or
+- * kernel threads (both mm NULL), since they never
+- * leave kernel.
+- */
+- if (p->mm && printk_ratelimit())
+- printk(KERN_INFO "process %d (%s) no "
+- "longer affine to cpu%d\n",
+- p->pid, p->comm, dead_cpu);
+- }
+- if (!__migrate_task(p, dead_cpu, dest_cpu))
+- goto restart;
++ /*
++ * Don't tell them about moving exiting tasks or
++ * kernel threads (both mm NULL), since they never
++ * leave kernel.
++ */
++ if (p->mm && printk_ratelimit()) {
++ printk(KERN_INFO "process %d (%s) no "
++ "longer affine to cpu%d\n",
++ task_pid_nr(p), p->comm, dead_cpu);
++ }
++ }
++ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
+ }
+
+ /*
+ * While a dead CPU has no uninterruptible tasks queued at this point,
+ * it might still have a nonzero ->nr_uninterruptible counter, because
+@@ -5156,27 +5290,27 @@ static void migrate_nr_uninterruptible(s
+ /* Run through task list and migrate tasks from the dead cpu. */
+ static void migrate_live_tasks(int src_cpu)
+ {
+ struct task_struct *p, *t;
+
+- write_lock_irq(&tasklist_lock);
++ read_lock(&tasklist_lock);
+
+ do_each_thread(t, p) {
+ if (p == current)
+ continue;
+
+ if (task_cpu(p) == src_cpu)
+ move_task_off_dead_cpu(src_cpu, p);
+ } while_each_thread(t, p);
+
+- write_unlock_irq(&tasklist_lock);
++ read_unlock(&tasklist_lock);
+ }
+
+ /*
+ * Schedules idle task to be the next runnable task on current CPU.
+- * It does so by boosting its priority to highest possible and adding it to
+- * the _front_ of the runqueue. Used by CPU offline code.
++ * It does so by boosting its priority to highest possible.
++ * Used by CPU offline code.
+ */
+ void sched_idle_next(void)
+ {
+ int this_cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(this_cpu);
+@@ -5192,12 +5326,12 @@ void sched_idle_next(void)
+ */
+ spin_lock_irqsave(&rq->lock, flags);
+
+ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
+
+- /* Add idle task to the _front_ of its priority queue: */
+- activate_idle_task(p, rq);
++ update_rq_clock(rq);
++ activate_task(rq, p, 0);
+
+ spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ /*
+@@ -5219,26 +5353,25 @@ void idle_task_exit(void)
+ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
+ {
+ struct rq *rq = cpu_rq(dead_cpu);
+
+ /* Must be exiting, otherwise would be on tasklist. */
+- BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
++ BUG_ON(!p->exit_state);
+
+ /* Cannot have done final schedule yet: would have vanished. */
+ BUG_ON(p->state == TASK_DEAD);
+
+ get_task_struct(p);
+
+ /*
+ * Drop lock around migration; if someone else moves it,
+- * that's OK. No task can be added to this CPU, so iteration is
++ * that's OK. No task can be added to this CPU, so iteration is
+ * fine.
+- * NOTE: interrupts should be left disabled --dev@
+ */
+- spin_unlock(&rq->lock);
++ spin_unlock_irq(&rq->lock);
+ move_task_off_dead_cpu(dead_cpu, p);
+- spin_lock(&rq->lock);
++ spin_lock_irq(&rq->lock);
+
+ put_task_struct(p);
+ }
+
+ /* release_task() removes task from tasklist, so we won't find dead tasks. */
+@@ -5265,34 +5398,52 @@ static void migrate_dead_tasks(unsigned
+ static struct ctl_table sd_ctl_dir[] = {
+ {
+ .procname = "sched_domain",
+ .mode = 0555,
+ },
+- {0,},
++ {0, },
+ };
+
+ static struct ctl_table sd_ctl_root[] = {
+ {
+ .ctl_name = CTL_KERN,
+ .procname = "kernel",
+ .mode = 0555,
+ .child = sd_ctl_dir,
+ },
+- {0,},
++ {0, },
+ };
+
+ static struct ctl_table *sd_alloc_ctl_entry(int n)
+ {
+ struct ctl_table *entry =
+- kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
+-
+- BUG_ON(!entry);
+- memset(entry, 0, n * sizeof(struct ctl_table));
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+
+ return entry;
+ }
+
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
+ static void
+ set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+ mode_t mode, proc_handler *proc_handler)
+ {
+@@ -5306,10 +5457,13 @@ set_table_entry(struct ctl_table *entry,
+ static struct ctl_table *
+ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ {
+ struct ctl_table *table = sd_alloc_ctl_entry(12);
+
++ if (table == NULL)
++ return NULL;
++
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+@@ -5329,10 +5483,11 @@ sd_alloc_ctl_domain_table(struct sched_d
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[10], "flags", &sd->flags,
+ sizeof(int), 0644, proc_dointvec_minmax);
++ /* &table[11] is terminator */
+
+ return table;
+ }
+
+ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+@@ -5343,10 +5498,12 @@ static ctl_table *sd_alloc_ctl_cpu_table
+ char buf[32];
+
+ for_each_domain(cpu, sd)
+ domain_num++;
+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
+
+ i = 0;
+ for_each_domain(cpu, sd) {
+ snprintf(buf, 32, "domain%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+@@ -5357,28 +5514,48 @@ static ctl_table *sd_alloc_ctl_cpu_table
+ }
+ return table;
+ }
+
+ static struct ctl_table_header *sd_sysctl_header;
+-static void init_sched_domain_sysctl(void)
++static void register_sched_domain_sysctl(void)
+ {
+ int i, cpu_num = num_online_cpus();
+ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ char buf[32];
+
++ WARN_ON(sd_ctl_dir[0].child);
+ sd_ctl_dir[0].child = entry;
+
+- for (i = 0; i < cpu_num; i++, entry++) {
++ if (entry == NULL)
++ return;
++
++ for_each_online_cpu(i) {
+ snprintf(buf, 32, "cpu%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0555;
+ entry->child = sd_alloc_ctl_cpu_table(i);
++ entry++;
+ }
++
++ WARN_ON(sd_sysctl_header);
+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
+ }
++
++/* may be called multiple times per register */
++static void unregister_sched_domain_sysctl(void)
++{
++ if (sd_sysctl_header)
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++ if (sd_ctl_dir[0].child)
++ sd_free_ctl_entry(&sd_ctl_dir[0].child);
++}
+ #else
+-static void init_sched_domain_sysctl(void)
++static void register_sched_domain_sysctl(void)
++{
++}
++static void unregister_sched_domain_sysctl(void)
+ {
+ }
+ #endif
+
+ /*
+@@ -5401,57 +5578,62 @@ migration_call(struct notifier_block *nf
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
+ kthread_bind(p, cpu);
+ /* Must be high prio: stop_machine expects to yield to it. */
+ rq = task_rq_lock(p, &flags);
+ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
+ task_rq_unlock(rq, &flags);
+ cpu_rq(cpu)->migration_thread = p;
+ break;
+
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- /* Strictly unneccessary, as first user will wake it. */
++ /* Strictly unnecessary, as first user will wake it. */
+ wake_up_process(cpu_rq(cpu)->migration_thread);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ if (!cpu_rq(cpu)->migration_thread)
+ break;
+- /* Unbind it from offline cpu so it can run. Fall thru. */
++ /* Unbind it from offline cpu so it can run. Fall thru. */
+ kthread_bind(cpu_rq(cpu)->migration_thread,
+ any_online_cpu(cpu_online_map));
+ kthread_stop(cpu_rq(cpu)->migration_thread);
+ cpu_rq(cpu)->migration_thread = NULL;
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
++ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
+ migrate_live_tasks(cpu);
+ rq = cpu_rq(cpu);
+ kthread_stop(rq->migration_thread);
+ rq->migration_thread = NULL;
+ /* Idle task back to normal (off runqueue, low prio) */
+- rq = task_rq_lock(rq->idle, &flags);
++ spin_lock_irq(&rq->lock);
+ update_rq_clock(rq);
+ deactivate_task(rq, rq->idle, 0);
+ rq->idle->static_prio = MAX_PRIO;
+ __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
+ rq->idle->sched_class = &idle_sched_class;
+ migrate_dead_tasks(cpu);
+- task_rq_unlock(rq, &flags);
++ spin_unlock_irq(&rq->lock);
++ cpuset_unlock();
+ migrate_nr_uninterruptible(rq);
+ BUG_ON(rq->nr_running != 0);
+
+- /* No need to migrate the tasks: it was best-effort if
+- * they didn't take sched_hotcpu_mutex. Just wake up
+- * the requestors. */
++ /*
++ * No need to migrate the tasks: it was best-effort if
++ * they didn't take sched_hotcpu_mutex. Just wake up
++ * the requestors.
++ */
+ spin_lock_irq(&rq->lock);
+ while (!list_empty(&rq->migration_queue)) {
+ struct migration_req *req;
+
+ req = list_entry(rq->migration_queue.next,
+@@ -5475,125 +5657,125 @@ migration_call(struct notifier_block *nf
+ static struct notifier_block __cpuinitdata migration_notifier = {
+ .notifier_call = migration_call,
+ .priority = 10
+ };
+
+-int __init migration_init(void)
++void __init migration_init(void)
+ {
+ void *cpu = (void *)(long)smp_processor_id();
+ int err;
+
+ /* Start one for the boot CPU: */
+ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+ BUG_ON(err == NOTIFY_BAD);
+ migration_call(&migration_notifier, CPU_ONLINE, cpu);
+ register_cpu_notifier(&migration_notifier);
+-
+- return 0;
+ }
+ #endif
+
+ #ifdef CONFIG_SMP
+
+ /* Number of possible processor ids */
+ int nr_cpu_ids __read_mostly = NR_CPUS;
+ EXPORT_SYMBOL(nr_cpu_ids);
+
+-#undef SCHED_DOMAIN_DEBUG
+-#ifdef SCHED_DOMAIN_DEBUG
+-static void sched_domain_debug(struct sched_domain *sd, int cpu)
+-{
+- int level = 0;
++#ifdef CONFIG_SCHED_DEBUG
+
+- if (!sd) {
+- printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
+- return;
++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
++{
++ struct sched_group *group = sd->groups;
++ cpumask_t groupmask;
++ char str[NR_CPUS];
++
++ cpumask_scnprintf(str, NR_CPUS, sd->span);
++ cpus_clear(groupmask);
++
++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
++
++ if (!(sd->flags & SD_LOAD_BALANCE)) {
++ printk("does not load-balance\n");
++ if (sd->parent)
++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
++ " has parent");
++ return -1;
+ }
+
+- printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
++ printk(KERN_CONT "span %s\n", str);
+
++ if (!cpu_isset(cpu, sd->span)) {
++ printk(KERN_ERR "ERROR: domain->span does not contain "
++ "CPU%d\n", cpu);
++ }
++ if (!cpu_isset(cpu, group->cpumask)) {
++ printk(KERN_ERR "ERROR: domain->groups does not contain"
++ " CPU%d\n", cpu);
++ }
++
++ printk(KERN_DEBUG "%*s groups:", level + 1, "");
+ do {
+- int i;
+- char str[NR_CPUS];
+- struct sched_group *group = sd->groups;
+- cpumask_t groupmask;
+-
+- cpumask_scnprintf(str, NR_CPUS, sd->span);
+- cpus_clear(groupmask);
+-
+- printk(KERN_DEBUG);
+- for (i = 0; i < level + 1; i++)
+- printk(" ");
+- printk("domain %d: ", level);
+-
+- if (!(sd->flags & SD_LOAD_BALANCE)) {
+- printk("does not load-balance\n");
+- if (sd->parent)
+- printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+- " has parent");
++ if (!group) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: group is NULL\n");
+ break;
+ }
+
+- printk("span %s\n", str);
++ if (!group->__cpu_power) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: domain->cpu_power not "
++ "set\n");
++ break;
++ }
+
+- if (!cpu_isset(cpu, sd->span))
+- printk(KERN_ERR "ERROR: domain->span does not contain "
+- "CPU%d\n", cpu);
+- if (!cpu_isset(cpu, group->cpumask))
+- printk(KERN_ERR "ERROR: domain->groups does not contain"
+- " CPU%d\n", cpu);
+-
+- printk(KERN_DEBUG);
+- for (i = 0; i < level + 2; i++)
+- printk(" ");
+- printk("groups:");
+- do {
+- if (!group) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: group is NULL\n");
+- break;
+- }
++ if (!cpus_weight(group->cpumask)) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: empty group\n");
++ break;
++ }
+
+- if (!group->__cpu_power) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: domain->cpu_power not "
+- "set\n");
+- }
++ if (cpus_intersects(groupmask, group->cpumask)) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: repeated CPUs\n");
++ break;
++ }
+
+- if (!cpus_weight(group->cpumask)) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: empty group\n");
+- }
++ cpus_or(groupmask, groupmask, group->cpumask);
+
+- if (cpus_intersects(groupmask, group->cpumask)) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: repeated CPUs\n");
+- }
++ cpumask_scnprintf(str, NR_CPUS, group->cpumask);
++ printk(KERN_CONT " %s", str);
++
++ group = group->next;
++ } while (group != sd->groups);
++ printk(KERN_CONT "\n");
++
++ if (!cpus_equal(sd->span, groupmask))
++ printk(KERN_ERR "ERROR: groups don't span domain->span\n");
++
++ if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
++ printk(KERN_ERR "ERROR: parent span is not a superset "
++ "of domain->span\n");
++ return 0;
++}
+
+- cpus_or(groupmask, groupmask, group->cpumask);
++static void sched_domain_debug(struct sched_domain *sd, int cpu)
++{
++ int level = 0;
+
+- cpumask_scnprintf(str, NR_CPUS, group->cpumask);
+- printk(" %s", str);
++ if (!sd) {
++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
++ return;
++ }
+
+- group = group->next;
+- } while (group != sd->groups);
+- printk("\n");
+-
+- if (!cpus_equal(sd->span, groupmask))
+- printk(KERN_ERR "ERROR: groups don't span "
+- "domain->span\n");
++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+
++ for (;;) {
++ if (sched_domain_debug_one(sd, cpu, level))
++ break;
+ level++;
+ sd = sd->parent;
+ if (!sd)
+- continue;
+-
+- if (!cpus_subset(groupmask, sd->span))
+- printk(KERN_ERR "ERROR: parent span is not a superset "
+- "of domain->span\n");
+-
+- } while (sd);
++ break;
++ }
+ }
+ #else
+ # define sched_domain_debug(sd, cpu) do { } while (0)
+ #endif
+
+@@ -5698,11 +5880,11 @@ static int __init isolated_cpu_setup(cha
+ if (ints[i] < NR_CPUS)
+ cpu_set(ints[i], cpu_isolated_map);
+ return 1;
+ }
+
+-__setup ("isolcpus=", isolated_cpu_setup);
++__setup("isolcpus=", isolated_cpu_setup);
+
+ /*
+ * init_sched_build_groups takes the cpumask we wish to span, and a pointer
+ * to a function which identifies what group(along with sched group) a CPU
+ * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
+@@ -5755,11 +5937,11 @@ init_sched_build_groups(cpumask_t span,
+ /**
+ * find_next_best_node - find the next node to include in a sched_domain
+ * @node: node whose sched_domain we're building
+ * @used_nodes: nodes already in the sched_domain
+ *
+- * Find the next node to include in a given scheduling domain. Simply
++ * Find the next node to include in a given scheduling domain. Simply
+ * finds the closest node not already in the @used_nodes map.
+ *
+ * Should use nodemask_t.
+ */
+ static int find_next_best_node(int node, unsigned long *used_nodes)
+@@ -5795,11 +5977,11 @@ static int find_next_best_node(int node,
+ /**
+ * sched_domain_node_span - get a cpumask for a node's sched_domain
+ * @node: node whose cpumask we're constructing
+ * @size: number of nodes to include in this span
+ *
+- * Given a node, construct a good cpumask for its sched_domain to span. It
++ * Given a node, construct a good cpumask for its sched_domain to span. It
+ * should be one that prevents unnecessary balancing, but also spreads tasks
+ * out optimally.
+ */
+ static cpumask_t sched_domain_node_span(int node)
+ {
+@@ -5832,12 +6014,12 @@ int sched_smt_power_savings = 0, sched_m
+ */
+ #ifdef CONFIG_SCHED_SMT
+ static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
+ static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
+
+-static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ if (sg)
+ *sg = &per_cpu(sched_group_cpus, cpu);
+ return cpu;
+ }
+@@ -5850,44 +6032,44 @@ static int cpu_to_cpu_group(int cpu, con
+ static DEFINE_PER_CPU(struct sched_domain, core_domains);
+ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
+ #endif
+
+ #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
+-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ int group;
+- cpumask_t mask = cpu_sibling_map[cpu];
++ cpumask_t mask = cpu_sibling_map(cpu);
+ cpus_and(mask, mask, *cpu_map);
+ group = first_cpu(mask);
+ if (sg)
+ *sg = &per_cpu(sched_group_core, group);
+ return group;
+ }
+ #elif defined(CONFIG_SCHED_MC)
+-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ if (sg)
+ *sg = &per_cpu(sched_group_core, cpu);
+ return cpu;
+ }
+ #endif
+
+ static DEFINE_PER_CPU(struct sched_domain, phys_domains);
+ static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
+
+-static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ int group;
+ #ifdef CONFIG_SCHED_MC
+ cpumask_t mask = cpu_coregroup_map(cpu);
+ cpus_and(mask, mask, *cpu_map);
+ group = first_cpu(mask);
+ #elif defined(CONFIG_SCHED_SMT)
+- cpumask_t mask = cpu_sibling_map[cpu];
++ cpumask_t mask = cpu_sibling_map(cpu);
+ cpus_and(mask, mask, *cpu_map);
+ group = first_cpu(mask);
+ #else
+ group = cpu;
+ #endif
+@@ -5927,28 +6109,27 @@ static void init_numa_sched_groups_power
+ struct sched_group *sg = group_head;
+ int j;
+
+ if (!sg)
+ return;
+-next_sg:
+- for_each_cpu_mask(j, sg->cpumask) {
+- struct sched_domain *sd;
++ do {
++ for_each_cpu_mask(j, sg->cpumask) {
++ struct sched_domain *sd;
+
+- sd = &per_cpu(phys_domains, j);
+- if (j != first_cpu(sd->groups->cpumask)) {
+- /*
+- * Only add "power" once for each
+- * physical package.
+- */
+- continue;
+- }
++ sd = &per_cpu(phys_domains, j);
++ if (j != first_cpu(sd->groups->cpumask)) {
++ /*
++ * Only add "power" once for each
++ * physical package.
++ */
++ continue;
++ }
+
+- sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+- }
+- sg = sg->next;
+- if (sg != group_head)
+- goto next_sg;
++ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
++ }
++ sg = sg->next;
++ } while (sg != group_head);
+ }
+ #endif
+
+ #ifdef CONFIG_NUMA
+ /* Free memory allocated for various sched_group structures */
+@@ -6055,12 +6236,12 @@ static int build_sched_domains(const cpu
+ int sd_allnodes = 0;
+
+ /*
+ * Allocate the per-node list of sched groups
+ */
+- sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
+- GFP_KERNEL);
++ sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
++ GFP_KERNEL);
+ if (!sched_group_nodes) {
+ printk(KERN_WARNING "Can not alloc sched group node list\n");
+ return -ENOMEM;
+ }
+ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
+@@ -6118,22 +6299,22 @@ static int build_sched_domains(const cpu
+
+ #ifdef CONFIG_SCHED_SMT
+ p = sd;
+ sd = &per_cpu(cpu_domains, i);
+ *sd = SD_SIBLING_INIT;
+- sd->span = cpu_sibling_map[i];
++ sd->span = cpu_sibling_map(i);
+ cpus_and(sd->span, sd->span, *cpu_map);
+ sd->parent = p;
+ p->child = sd;
+ cpu_to_cpu_group(i, cpu_map, &sd->groups);
+ #endif
+ }
+
+ #ifdef CONFIG_SCHED_SMT
+ /* Set up CPU (sibling) groups */
+ for_each_cpu_mask(i, *cpu_map) {
+- cpumask_t this_sibling_map = cpu_sibling_map[i];
++ cpumask_t this_sibling_map = cpu_sibling_map(i);
+ cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
+ if (i != first_cpu(this_sibling_map))
+ continue;
+
+ init_sched_build_groups(this_sibling_map, cpu_map,
+@@ -6291,26 +6472,37 @@ static int build_sched_domains(const cpu
+ error:
+ free_sched_groups(cpu_map);
+ return -ENOMEM;
+ #endif
+ }
++
++static cpumask_t *doms_cur; /* current sched domains */
++static int ndoms_cur; /* number of sched domains in 'doms_cur' */
++
++/*
++ * Special case: If a kmalloc of a doms_cur partition (array of
++ * cpumask_t) fails, then fallback to a single sched domain,
++ * as determined by the single cpumask_t fallback_doms.
++ */
++static cpumask_t fallback_doms;
++
+ /*
+- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ * For now this just excludes isolated cpus, but could be used to
++ * exclude other special cases in the future.
+ */
+ static int arch_init_sched_domains(const cpumask_t *cpu_map)
+ {
+- cpumask_t cpu_default_map;
+ int err;
+
+- /*
+- * Setup mask for cpus without special case scheduling requirements.
+- * For now this just excludes isolated cpus, but could be used to
+- * exclude other special cases in the future.
+- */
+- cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
+-
+- err = build_sched_domains(&cpu_default_map);
++ ndoms_cur = 1;
++ doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
++ if (!doms_cur)
++ doms_cur = &fallback_doms;
++ cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
++ err = build_sched_domains(doms_cur);
++ register_sched_domain_sysctl();
+
+ return err;
+ }
+
+ static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
+@@ -6324,41 +6516,83 @@ static void arch_destroy_sched_domains(c
+ */
+ static void detach_destroy_domains(const cpumask_t *cpu_map)
+ {
+ int i;
+
++ unregister_sched_domain_sysctl();
++
+ for_each_cpu_mask(i, *cpu_map)
+ cpu_attach_domain(NULL, i);
+ synchronize_sched();
+ arch_destroy_sched_domains(cpu_map);
+ }
+
+ /*
+- * Partition sched domains as specified by the cpumasks below.
+- * This attaches all cpus from the cpumasks to the NULL domain,
+- * waits for a RCU quiescent period, recalculates sched
+- * domain information and then attaches them back to the
+- * correct sched domains
++ * Partition sched domains as specified by the 'ndoms_new'
++ * cpumasks in the array doms_new[] of cpumasks. This compares
++ * doms_new[] to the current sched domain partitioning, doms_cur[].
++ * It destroys each deleted domain and builds each new domain.
++ *
++ * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
++ * The masks don't intersect (don't overlap.) We should setup one
++ * sched domain for each mask. CPUs not in any of the cpumasks will
++ * not be load balanced. If the same cpumask appears both in the
++ * current 'doms_cur' domains and in the new 'doms_new', we can leave
++ * it as it is.
++ *
++ * The passed in 'doms_new' should be kmalloc'd. This routine takes
++ * ownership of it and will kfree it when done with it. If the caller
++ * failed the kmalloc call, then it can pass in doms_new == NULL,
++ * and partition_sched_domains() will fallback to the single partition
++ * 'fallback_doms'.
++ *
+ * Call with hotplug lock held
+ */
+-int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
++void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
+ {
+- cpumask_t change_map;
+- int err = 0;
++ int i, j;
+
+- cpus_and(*partition1, *partition1, cpu_online_map);
+- cpus_and(*partition2, *partition2, cpu_online_map);
+- cpus_or(change_map, *partition1, *partition2);
+-
+- /* Detach sched domains from all of the affected cpus */
+- detach_destroy_domains(&change_map);
+- if (!cpus_empty(*partition1))
+- err = build_sched_domains(partition1);
+- if (!err && !cpus_empty(*partition2))
+- err = build_sched_domains(partition2);
++ /* always unregister in case we don't destroy any domains */
++ unregister_sched_domain_sysctl();
+
+- return err;
++ if (doms_new == NULL) {
++ ndoms_new = 1;
++ doms_new = &fallback_doms;
++ cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
++ }
++
++ /* Destroy deleted domains */
++ for (i = 0; i < ndoms_cur; i++) {
++ for (j = 0; j < ndoms_new; j++) {
++ if (cpus_equal(doms_cur[i], doms_new[j]))
++ goto match1;
++ }
++ /* no match - a current sched domain not in new doms_new[] */
++ detach_destroy_domains(doms_cur + i);
++match1:
++ ;
++ }
++
++ /* Build new domains */
++ for (i = 0; i < ndoms_new; i++) {
++ for (j = 0; j < ndoms_cur; j++) {
++ if (cpus_equal(doms_new[i], doms_cur[j]))
++ goto match2;
++ }
++ /* no match - add a new doms_new */
++ build_sched_domains(doms_new + i);
++match2:
++ ;
++ }
++
++ /* Remember the new sched domains */
++ if (doms_cur != &fallback_doms)
++ kfree(doms_cur);
++ doms_cur = doms_new;
++ ndoms_cur = ndoms_new;
++
++ register_sched_domain_sysctl();
+ }
+
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ static int arch_reinit_sched_domains(void)
+ {
+@@ -6434,11 +6668,11 @@ int sched_create_sysfs_power_savings_ent
+ return err;
+ }
+ #endif
+
+ /*
+- * Force a reinitialization of the sched domains hierarchy. The domains
++ * Force a reinitialization of the sched domains hierarchy. The domains
+ * and groups cannot be updated in place without racing with the balancing
+ * code, so we temporarily attach all running cpus to the NULL domain
+ * which will prevent rebalancing while the sched domains are recalculated.
+ */
+ static int update_sched_domains(struct notifier_block *nfb,
+@@ -6485,12 +6719,10 @@ void __init sched_init_smp(void)
+ cpu_set(smp_processor_id(), non_isolated_cpus);
+ mutex_unlock(&sched_hotcpu_mutex);
+ /* XXX: Theoretical race here - CPU may be hotplugged now */
+ hotcpu_notifier(update_sched_domains, 0);
+
+- init_sched_domain_sysctl();
+-
+ /* Move init over to a non-isolated CPU */
+ if (set_cpus_allowed(current, non_isolated_cpus) < 0)
+ BUG();
+ sched_init_granularity();
+ }
+@@ -6501,40 +6733,29 @@ void __init sched_init_smp(void)
+ }
+ #endif /* CONFIG_SMP */
+
+ int in_sched_functions(unsigned long addr)
+ {
+- /* Linker adds these: start and end of __sched functions */
+- extern char __sched_text_start[], __sched_text_end[];
+-
+ return in_lock_functions(addr) ||
+ (addr >= (unsigned long)__sched_text_start
+ && addr < (unsigned long)__sched_text_end);
+ }
+
+-static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
++static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
+ {
+ cfs_rq->tasks_timeline = RB_ROOT;
+- cfs_rq->fair_clock = 1;
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ cfs_rq->rq = rq;
+ #endif
++ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+ }
+
+ void __init sched_init(void)
+ {
+- u64 now = sched_clock();
+ int highest_cpu = 0;
+ int i, j;
+
+- /*
+- * Link up the scheduling class hierarchy:
+- */
+- rt_sched_class.next = &fair_sched_class;
+- fair_sched_class.next = &idle_sched_class;
+- idle_sched_class.next = NULL;
+-
+ for_each_possible_cpu(i) {
+ struct rt_prio_array *array;
+ struct rq *rq;
+
+ rq = cpu_rq(i);
+@@ -6543,14 +6764,32 @@ void __init sched_init(void)
+ rq->nr_running = 0;
+ rq->clock = 1;
+ init_cfs_rq(&rq->cfs, rq);
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+- list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
++ {
++ struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
++ struct sched_entity *se =
++ &per_cpu(init_sched_entity, i);
++
++ init_cfs_rq_p[i] = cfs_rq;
++ init_cfs_rq(cfs_rq, rq);
++ cfs_rq->tg = &init_task_group;
++ list_add(&cfs_rq->leaf_cfs_rq_list,
++ &rq->leaf_cfs_rq_list);
++
++ init_sched_entity_p[i] = se;
++ se->cfs_rq = &rq->cfs;
++ se->my_q = cfs_rq;
++ se->load.weight = init_task_group_load;
++ se->load.inv_weight =
++ div64_64(1ULL<<32, init_task_group_load);
++ se->parent = NULL;
++ }
++ init_task_group.shares = init_task_group_load;
++ spin_lock_init(&init_task_group.lock);
+ #endif
+- rq->ls.load_update_last = now;
+- rq->ls.load_update_start = now;
+
+ for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
+ rq->cpu_load[j] = 0;
+ #ifdef CONFIG_SMP
+ rq->sd = NULL;
+@@ -6631,30 +6870,44 @@ void __might_sleep(char *file, int line)
+ }
+ EXPORT_SYMBOL(__might_sleep);
+ #endif
+
+ #ifdef CONFIG_MAGIC_SYSRQ
++static void normalize_task(struct rq *rq, struct task_struct *p)
++{
++ int on_rq;
++ update_rq_clock(rq);
++ on_rq = p->se.on_rq;
++ if (on_rq)
++ deactivate_task(rq, p, 0);
++ __setscheduler(rq, p, SCHED_NORMAL, 0);
++ if (on_rq) {
++ activate_task(rq, p, 0);
++ resched_task(rq->curr);
++ }
++}
++
+ void normalize_rt_tasks(void)
+ {
+ struct task_struct *g, *p;
+ unsigned long flags;
+ struct rq *rq;
+- int on_rq;
+
+ read_lock_irq(&tasklist_lock);
+ do_each_thread(g, p) {
+- p->se.fair_key = 0;
+- p->se.wait_runtime = 0;
++ /*
++ * Only normalize user tasks:
++ */
++ if (!p->mm)
++ continue;
++
+ p->se.exec_start = 0;
+- p->se.wait_start_fair = 0;
+- p->se.sleep_start_fair = 0;
+ #ifdef CONFIG_SCHEDSTATS
+ p->se.wait_start = 0;
+ p->se.sleep_start = 0;
+ p->se.block_start = 0;
+ #endif
+- task_rq(p)->cfs.fair_clock = 0;
+ task_rq(p)->clock = 0;
+
+ if (!rt_task(p)) {
+ /*
+ * Renice negative nice level userspace
+@@ -6665,30 +6918,13 @@ void normalize_rt_tasks(void)
+ continue;
+ }
+
+ spin_lock_irqsave(&p->pi_lock, flags);
+ rq = __task_rq_lock(p);
+-#ifdef CONFIG_SMP
+- /*
+- * Do not touch the migration thread:
+- */
+- if (p == rq->migration_thread)
+- goto out_unlock;
+-#endif
+
+- update_rq_clock(rq);
+- on_rq = p->se.on_rq;
+- if (on_rq)
+- deactivate_task(rq, p, 0);
+- __setscheduler(rq, p, SCHED_NORMAL, 0);
+- if (on_rq) {
+- activate_task(rq, p, 0);
+- resched_task(rq->curr);
+- }
+-#ifdef CONFIG_SMP
+- out_unlock:
+-#endif
++ normalize_task(rq, p);
++
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
+ } while_each_thread(g, p);
+
+ read_unlock_irq(&tasklist_lock);
+@@ -6722,12 +6958,12 @@ struct task_struct *curr_task(int cpu)
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+- * are serviced on a separate stack. It allows the architecture to switch the
+- * notion of the current task on a cpu in a non-blocking manner. This function
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a cpu in a non-blocking manner. This function
+ * must be called with all CPU's synchronized, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+@@ -6737,5 +6973,427 @@ void set_curr_task(int cpu, struct task_
+ {
+ cpu_curr(cpu) = p;
+ }
+
+ #endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(void)
++{
++ struct task_group *tg;
++ struct cfs_rq *cfs_rq;
++ struct sched_entity *se;
++ struct rq *rq;
++ int i;
++
++ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
++ if (!tg->cfs_rq)
++ goto err;
++ tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
++ if (!tg->se)
++ goto err;
++
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++
++ cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL,
++ cpu_to_node(i));
++ if (!cfs_rq)
++ goto err;
++
++ se = kmalloc_node(sizeof(struct sched_entity), GFP_KERNEL,
++ cpu_to_node(i));
++ if (!se)
++ goto err;
++
++ memset(cfs_rq, 0, sizeof(struct cfs_rq));
++ memset(se, 0, sizeof(struct sched_entity));
++
++ tg->cfs_rq[i] = cfs_rq;
++ init_cfs_rq(cfs_rq, rq);
++ cfs_rq->tg = tg;
++
++ tg->se[i] = se;
++ se->cfs_rq = &rq->cfs;
++ se->my_q = cfs_rq;
++ se->load.weight = NICE_0_LOAD;
++ se->load.inv_weight = div64_64(1ULL<<32, NICE_0_LOAD);
++ se->parent = NULL;
++ }
++
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++ cfs_rq = tg->cfs_rq[i];
++ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
++ }
++
++ tg->shares = NICE_0_LOAD;
++ spin_lock_init(&tg->lock);
++
++ return tg;
++
++err:
++ for_each_possible_cpu(i) {
++ if (tg->cfs_rq)
++ kfree(tg->cfs_rq[i]);
++ if (tg->se)
++ kfree(tg->se[i]);
++ }
++ kfree(tg->cfs_rq);
++ kfree(tg->se);
++ kfree(tg);
++
++ return ERR_PTR(-ENOMEM);
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void free_sched_group(struct rcu_head *rhp)
++{
++ struct task_group *tg = container_of(rhp, struct task_group, rcu);
++ struct cfs_rq *cfs_rq;
++ struct sched_entity *se;
++ int i;
++
++ /* now it should be safe to free those cfs_rqs */
++ for_each_possible_cpu(i) {
++ cfs_rq = tg->cfs_rq[i];
++ kfree(cfs_rq);
++
++ se = tg->se[i];
++ kfree(se);
++ }
++
++ kfree(tg->cfs_rq);
++ kfree(tg->se);
++ kfree(tg);
++}
++
++/* Destroy runqueue etc associated with a task group */
++void sched_destroy_group(struct task_group *tg)
++{
++ struct cfs_rq *cfs_rq = NULL;
++ int i;
++
++ for_each_possible_cpu(i) {
++ cfs_rq = tg->cfs_rq[i];
++ list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
++ }
++
++ BUG_ON(!cfs_rq);
++
++ /* wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, free_sched_group);
++}
++
++/* change task's runqueue when it moves between groups.
++ * The caller of this function should have put the task in its new group
++ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
++ * reflect its new group.
++ */
++void sched_move_task(struct task_struct *tsk)
++{
++ int on_rq, running;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(tsk, &flags);
++
++ if (tsk->sched_class != &fair_sched_class) {
++ set_task_cfs_rq(tsk, task_cpu(tsk));
++ goto done;
++ }
++
++ update_rq_clock(rq);
++
++ running = task_current(rq, tsk);
++ on_rq = tsk->se.on_rq;
++
++ if (on_rq) {
++ dequeue_task(rq, tsk, 0);
++ if (unlikely(running))
++ tsk->sched_class->put_prev_task(rq, tsk);
++ }
++
++ set_task_cfs_rq(tsk, task_cpu(tsk));
++
++ if (on_rq) {
++ if (unlikely(running))
++ tsk->sched_class->set_curr_task(rq);
++ enqueue_task(rq, tsk, 0);
++ }
++
++done:
++ task_rq_unlock(rq, &flags);
++}
++
++static void set_se_shares(struct sched_entity *se, unsigned long shares)
++{
++ struct cfs_rq *cfs_rq = se->cfs_rq;
++ struct rq *rq = cfs_rq->rq;
++ int on_rq;
++
++ spin_lock_irq(&rq->lock);
++
++ on_rq = se->on_rq;
++ if (on_rq)
++ dequeue_entity(cfs_rq, se, 0);
++
++ se->load.weight = shares;
++ se->load.inv_weight = div64_64((1ULL<<32), shares);
++
++ if (on_rq)
++ enqueue_entity(cfs_rq, se, 0);
++
++ spin_unlock_irq(&rq->lock);
++}
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++ int i;
++
++ spin_lock(&tg->lock);
++ if (tg->shares == shares)
++ goto done;
++
++ tg->shares = shares;
++ for_each_possible_cpu(i)
++ set_se_shares(tg->se[i], shares);
++
++done:
++ spin_unlock(&tg->lock);
++ return 0;
++}
++
++unsigned long sched_group_shares(struct task_group *tg)
++{
++ return tg->shares;
++}
++
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++#ifdef CONFIG_FAIR_CGROUP_SCHED
++
++/* return corresponding task_group object of a cgroup */
++static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
++{
++ return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
++ struct task_group, css);
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct task_group *tg;
++
++ if (!cgrp->parent) {
++ /* This is early initialization for the top cgroup */
++ init_task_group.css.cgroup = cgrp;
++ return &init_task_group.css;
++ }
++
++ /* we support only 1-level deep hierarchical scheduler atm */
++ if (cgrp->parent->parent)
++ return ERR_PTR(-EINVAL);
++
++ tg = sched_create_group();
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++
++ /* Bind the cgroup to task_group object we just created */
++ tg->css.cgroup = cgrp;
++
++ return &tg->css;
++}
++
++static void
++cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct task_group *tg = cgroup_tg(cgrp);
++
++ sched_destroy_group(tg);
++}
++
++static int
++cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
++ struct task_struct *tsk)
++{
++ /* We don't support RT-tasks being in separate groups */
++ if (tsk->sched_class != &fair_sched_class)
++ return -EINVAL;
++
++ return 0;
++}
++
++static void
++cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
++ struct cgroup *old_cont, struct task_struct *tsk)
++{
++ sched_move_task(tsk);
++}
++
++static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
++ u64 shareval)
++{
++ return sched_group_set_shares(cgroup_tg(cgrp), shareval);
++}
++
++static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
++{
++ struct task_group *tg = cgroup_tg(cgrp);
++
++ return (u64) tg->shares;
++}
++
++static struct cftype cpu_files[] = {
++ {
++ .name = "shares",
++ .read_uint = cpu_shares_read_uint,
++ .write_uint = cpu_shares_write_uint,
++ },
++};
++
++static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
++}
++
++struct cgroup_subsys cpu_cgroup_subsys = {
++ .name = "cpu",
++ .create = cpu_cgroup_create,
++ .destroy = cpu_cgroup_destroy,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .populate = cpu_cgroup_populate,
++ .subsys_id = cpu_cgroup_subsys_id,
++ .early_init = 1,
++};
++
++#endif /* CONFIG_FAIR_CGROUP_SCHED */
++
++#ifdef CONFIG_CGROUP_CPUACCT
++
++/*
++ * CPU accounting code for task groups.
++ *
++ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
++ * (balbir@in.ibm.com).
++ */
++
++/* track cpu usage of a group of tasks */
++struct cpuacct {
++ struct cgroup_subsys_state css;
++ /* cpuusage holds pointer to a u64-type object on every cpu */
++ u64 *cpuusage;
++};
++
++struct cgroup_subsys cpuacct_subsys;
++
++/* return cpu accounting group corresponding to this container */
++static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
++{
++ return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
++ struct cpuacct, css);
++}
++
++/* return cpu accounting group to which this task belongs */
++static inline struct cpuacct *task_ca(struct task_struct *tsk)
++{
++ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
++ struct cpuacct, css);
++}
++
++/* create a new cpu accounting group */
++static struct cgroup_subsys_state *cpuacct_create(
++ struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
++
++ if (!ca)
++ return ERR_PTR(-ENOMEM);
++
++ ca->cpuusage = alloc_percpu(u64);
++ if (!ca->cpuusage) {
++ kfree(ca);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return &ca->css;
++}
++
++/* destroy an existing cpu accounting group */
++static void
++cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ struct cpuacct *ca = cgroup_ca(cont);
++
++ free_percpu(ca->cpuusage);
++ kfree(ca);
++}
++
++/* return total cpu usage (in nanoseconds) of a group */
++static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
++{
++ struct cpuacct *ca = cgroup_ca(cont);
++ u64 totalcpuusage = 0;
++ int i;
++
++ for_each_possible_cpu(i) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
++
++ /*
++ * Take rq->lock to make 64-bit addition safe on 32-bit
++ * platforms.
++ */
++ spin_lock_irq(&cpu_rq(i)->lock);
++ totalcpuusage += *cpuusage;
++ spin_unlock_irq(&cpu_rq(i)->lock);
++ }
++
++ return totalcpuusage;
++}
++
++static struct cftype files[] = {
++ {
++ .name = "usage",
++ .read_uint = cpuusage_read,
++ },
++};
++
++static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
++}
++
++/*
++ * charge this task's execution time to its accounting group.
++ *
++ * called with rq->lock held.
++ */
++static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
++{
++ struct cpuacct *ca;
++
++ if (!cpuacct_subsys.active)
++ return;
++
++ ca = task_ca(tsk);
++ if (ca) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
++
++ *cpuusage += cputime;
++ }
++}
++
++struct cgroup_subsys cpuacct_subsys = {
++ .name = "cpuacct",
++ .create = cpuacct_create,
++ .destroy = cpuacct_destroy,
++ .populate = cpuacct_populate,
++ .subsys_id = cpuacct_subsys_id,
++};
++#endif /* CONFIG_CGROUP_CPUACCT */
+--- linux-2.6.23.orig/kernel/sched_debug.c
++++ linux-2.6.23/kernel/sched_debug.c
+@@ -26,104 +26,125 @@
+ seq_printf(m, x); \
+ else \
+ printk(x); \
+ } while (0)
+
++/*
++ * Ease the printing of nsec fields:
++ */
++static long long nsec_high(long long nsec)
++{
++ if (nsec < 0) {
++ nsec = -nsec;
++ do_div(nsec, 1000000);
++ return -nsec;
++ }
++ do_div(nsec, 1000000);
++
++ return nsec;
++}
++
++static unsigned long nsec_low(long long nsec)
++{
++ if (nsec < 0)
++ nsec = -nsec;
++
++ return do_div(nsec, 1000000);
++}
++
++#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
++
+ static void
+ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
+ {
+ if (rq->curr == p)
+ SEQ_printf(m, "R");
+ else
+ SEQ_printf(m, " ");
+
+- SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d ",
++ SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
+ p->comm, p->pid,
+- (long long)p->se.fair_key,
+- (long long)(p->se.fair_key - rq->cfs.fair_clock),
+- (long long)p->se.wait_runtime,
++ SPLIT_NS(p->se.vruntime),
+ (long long)(p->nvcsw + p->nivcsw),
+ p->prio);
+ #ifdef CONFIG_SCHEDSTATS
+- SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
+- (long long)p->se.sum_exec_runtime,
+- (long long)p->se.sum_wait_runtime,
+- (long long)p->se.sum_sleep_runtime,
+- (long long)p->se.wait_runtime_overruns,
+- (long long)p->se.wait_runtime_underruns);
++ SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
++ SPLIT_NS(p->se.vruntime),
++ SPLIT_NS(p->se.sum_exec_runtime),
++ SPLIT_NS(p->se.sum_sleep_runtime));
+ #else
+- SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
+- 0LL, 0LL, 0LL, 0LL, 0LL);
++ SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
++ 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
+ #endif
+ }
+
+ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+ {
+ struct task_struct *g, *p;
++ unsigned long flags;
+
+ SEQ_printf(m,
+ "\nrunnable tasks:\n"
+- " task PID tree-key delta waiting"
+- " switches prio"
+- " sum-exec sum-wait sum-sleep"
+- " wait-overrun wait-underrun\n"
+- "------------------------------------------------------------------"
+- "----------------"
+- "------------------------------------------------"
+- "--------------------------------\n");
++ " task PID tree-key switches prio"
++ " exec-runtime sum-exec sum-sleep\n"
++ "------------------------------------------------------"
++ "----------------------------------------------------\n");
+
+- read_lock_irq(&tasklist_lock);
++ read_lock_irqsave(&tasklist_lock, flags);
+
+ do_each_thread(g, p) {
+ if (!p->se.on_rq || task_cpu(p) != rq_cpu)
+ continue;
+
+ print_task(m, rq, p);
+ } while_each_thread(g, p);
+
+- read_unlock_irq(&tasklist_lock);
++ read_unlock_irqrestore(&tasklist_lock, flags);
+ }
+
+-static void
+-print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
++void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ {
+- s64 wait_runtime_rq_sum = 0;
+- struct task_struct *p;
+- struct rb_node *curr;
+- unsigned long flags;
++ s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
++ spread, rq0_min_vruntime, spread0;
+ struct rq *rq = &per_cpu(runqueues, cpu);
++ struct sched_entity *last;
++ unsigned long flags;
+
+- spin_lock_irqsave(&rq->lock, flags);
+- curr = first_fair(cfs_rq);
+- while (curr) {
+- p = rb_entry(curr, struct task_struct, se.run_node);
+- wait_runtime_rq_sum += p->se.wait_runtime;
+-
+- curr = rb_next(curr);
+- }
+- spin_unlock_irqrestore(&rq->lock, flags);
+-
+- SEQ_printf(m, " .%-30s: %Ld\n", "wait_runtime_rq_sum",
+- (long long)wait_runtime_rq_sum);
+-}
+-
+-void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+-{
+ SEQ_printf(m, "\ncfs_rq\n");
+
+-#define P(x) \
+- SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
+-
+- P(fair_clock);
+- P(exec_clock);
+- P(wait_runtime);
+- P(wait_runtime_overruns);
+- P(wait_runtime_underruns);
+- P(sleeper_bonus);
+-#undef P
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
++ SPLIT_NS(cfs_rq->exec_clock));
+
+- print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
++ spin_lock_irqsave(&rq->lock, flags);
++ if (cfs_rq->rb_leftmost)
++ MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
++ last = __pick_last_entity(cfs_rq);
++ if (last)
++ max_vruntime = last->vruntime;
++ min_vruntime = rq->cfs.min_vruntime;
++ rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
++ spin_unlock_irqrestore(&rq->lock, flags);
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
++ SPLIT_NS(MIN_vruntime));
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
++ SPLIT_NS(min_vruntime));
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
++ SPLIT_NS(max_vruntime));
++ spread = max_vruntime - MIN_vruntime;
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
++ SPLIT_NS(spread));
++ spread0 = min_vruntime - rq0_min_vruntime;
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
++ SPLIT_NS(spread0));
++ SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
++ SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
++#ifdef CONFIG_SCHEDSTATS
++ SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
++ rq->bkl_count);
++#endif
++ SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
++ cfs_rq->nr_spread_over);
+ }
+
+ static void print_cpu(struct seq_file *m, int cpu)
+ {
+ struct rq *rq = &per_cpu(runqueues, cpu);
+@@ -139,35 +160,36 @@ static void print_cpu(struct seq_file *m
+ SEQ_printf(m, "\ncpu#%d\n", cpu);
+ #endif
+
+ #define P(x) \
+ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x))
++#define PN(x) \
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
+
+ P(nr_running);
+ SEQ_printf(m, " .%-30s: %lu\n", "load",
+- rq->ls.load.weight);
+- P(ls.delta_fair);
+- P(ls.delta_exec);
++ rq->load.weight);
+ P(nr_switches);
+ P(nr_load_updates);
+ P(nr_uninterruptible);
+ SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
+- P(next_balance);
++ PN(next_balance);
+ P(curr->pid);
+- P(clock);
+- P(idle_clock);
+- P(prev_clock_raw);
++ PN(clock);
++ PN(idle_clock);
++ PN(prev_clock_raw);
+ P(clock_warps);
+ P(clock_overflows);
+ P(clock_deep_idle_events);
+- P(clock_max_delta);
++ PN(clock_max_delta);
+ P(cpu_load[0]);
+ P(cpu_load[1]);
+ P(cpu_load[2]);
+ P(cpu_load[3]);
+ P(cpu_load[4]);
+ #undef P
++#undef PN
+
+ print_cfs_stats(m, cpu);
+
+ print_rq(m, rq, cpu);
+ }
+@@ -175,16 +197,29 @@ static void print_cpu(struct seq_file *m
+ static int sched_debug_show(struct seq_file *m, void *v)
+ {
+ u64 now = ktime_to_ns(ktime_get());
+ int cpu;
+
+- SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n",
++ SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+
+- SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now);
++ SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now));
++
++#define P(x) \
++ SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
++#define PN(x) \
++ SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
++ PN(sysctl_sched_latency);
++ PN(sysctl_sched_min_granularity);
++ PN(sysctl_sched_wakeup_granularity);
++ PN(sysctl_sched_batch_wakeup_granularity);
++ PN(sysctl_sched_child_runs_first);
++ P(sysctl_sched_features);
++#undef PN
++#undef P
+
+ for_each_online_cpu(cpu)
+ print_cpu(m, cpu);
+
+ SEQ_printf(m, "\n");
+@@ -200,11 +235,11 @@ static void sysrq_sched_debug_show(void)
+ static int sched_debug_open(struct inode *inode, struct file *filp)
+ {
+ return single_open(filp, sched_debug_show, NULL);
+ }
+
+-static struct file_operations sched_debug_fops = {
++static const struct file_operations sched_debug_fops = {
+ .open = sched_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+@@ -224,10 +259,11 @@ static int __init init_sched_debug_procf
+
+ __initcall(init_sched_debug_procfs);
+
+ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ {
++ unsigned long nr_switches;
+ unsigned long flags;
+ int num_threads = 1;
+
+ rcu_read_lock();
+ if (lock_task_sighand(p, &flags)) {
+@@ -235,53 +271,126 @@ void proc_sched_show_task(struct task_st
+ unlock_task_sighand(p, &flags);
+ }
+ rcu_read_unlock();
+
+ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
+- SEQ_printf(m, "----------------------------------------------\n");
++ SEQ_printf(m,
++ "---------------------------------------------------------\n");
++#define __P(F) \
++ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
+ #define P(F) \
+- SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
++ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
++#define __PN(F) \
++ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
++#define PN(F) \
++ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
++
++ PN(se.exec_start);
++ PN(se.vruntime);
++ PN(se.sum_exec_runtime);
+
+- P(se.wait_runtime);
+- P(se.wait_start_fair);
+- P(se.exec_start);
+- P(se.sleep_start_fair);
+- P(se.sum_exec_runtime);
++ nr_switches = p->nvcsw + p->nivcsw;
+
+ #ifdef CONFIG_SCHEDSTATS
+- P(se.wait_start);
+- P(se.sleep_start);
+- P(se.block_start);
+- P(se.sleep_max);
+- P(se.block_max);
+- P(se.exec_max);
+- P(se.wait_max);
+- P(se.wait_runtime_overruns);
+- P(se.wait_runtime_underruns);
+- P(se.sum_wait_runtime);
++ PN(se.wait_start);
++ PN(se.sleep_start);
++ PN(se.block_start);
++ PN(se.sleep_max);
++ PN(se.block_max);
++ PN(se.exec_max);
++ PN(se.slice_max);
++ PN(se.wait_max);
++ P(sched_info.bkl_count);
++ P(se.nr_migrations);
++ P(se.nr_migrations_cold);
++ P(se.nr_failed_migrations_affine);
++ P(se.nr_failed_migrations_running);
++ P(se.nr_failed_migrations_hot);
++ P(se.nr_forced_migrations);
++ P(se.nr_forced2_migrations);
++ P(se.nr_wakeups);
++ P(se.nr_wakeups_sync);
++ P(se.nr_wakeups_migrate);
++ P(se.nr_wakeups_local);
++ P(se.nr_wakeups_remote);
++ P(se.nr_wakeups_affine);
++ P(se.nr_wakeups_affine_attempts);
++ P(se.nr_wakeups_passive);
++ P(se.nr_wakeups_idle);
++
++ {
++ u64 avg_atom, avg_per_cpu;
++
++ avg_atom = p->se.sum_exec_runtime;
++ if (nr_switches)
++ do_div(avg_atom, nr_switches);
++ else
++ avg_atom = -1LL;
++
++ avg_per_cpu = p->se.sum_exec_runtime;
++ if (p->se.nr_migrations) {
++ avg_per_cpu = div64_64(avg_per_cpu,
++ p->se.nr_migrations);
++ } else {
++ avg_per_cpu = -1LL;
++ }
++
++ __PN(avg_atom);
++ __PN(avg_per_cpu);
++ }
+ #endif
+- SEQ_printf(m, "%-25s:%20Ld\n",
+- "nr_switches", (long long)(p->nvcsw + p->nivcsw));
++ __P(nr_switches);
++ SEQ_printf(m, "%-35s:%21Ld\n",
++ "nr_voluntary_switches", (long long)p->nvcsw);
++ SEQ_printf(m, "%-35s:%21Ld\n",
++ "nr_involuntary_switches", (long long)p->nivcsw);
++
+ P(se.load.weight);
+ P(policy);
+ P(prio);
++#undef PN
++#undef __PN
+ #undef P
++#undef __P
+
+ {
+ u64 t0, t1;
+
+ t0 = sched_clock();
+ t1 = sched_clock();
+- SEQ_printf(m, "%-25s:%20Ld\n",
++ SEQ_printf(m, "%-35s:%21Ld\n",
+ "clock-delta", (long long)(t1-t0));
+ }
+ }
+
+ void proc_sched_set_task(struct task_struct *p)
+ {
+ #ifdef CONFIG_SCHEDSTATS
+- p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
+- p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
++ p->se.wait_max = 0;
++ p->se.sleep_max = 0;
++ p->se.sum_sleep_runtime = 0;
++ p->se.block_max = 0;
++ p->se.exec_max = 0;
++ p->se.slice_max = 0;
++ p->se.nr_migrations = 0;
++ p->se.nr_migrations_cold = 0;
++ p->se.nr_failed_migrations_affine = 0;
++ p->se.nr_failed_migrations_running = 0;
++ p->se.nr_failed_migrations_hot = 0;
++ p->se.nr_forced_migrations = 0;
++ p->se.nr_forced2_migrations = 0;
++ p->se.nr_wakeups = 0;
++ p->se.nr_wakeups_sync = 0;
++ p->se.nr_wakeups_migrate = 0;
++ p->se.nr_wakeups_local = 0;
++ p->se.nr_wakeups_remote = 0;
++ p->se.nr_wakeups_affine = 0;
++ p->se.nr_wakeups_affine_attempts = 0;
++ p->se.nr_wakeups_passive = 0;
++ p->se.nr_wakeups_idle = 0;
++ p->sched_info.bkl_count = 0;
+ #endif
+- p->se.sum_exec_runtime = 0;
+- p->se.prev_sum_exec_runtime = 0;
++ p->se.sum_exec_runtime = 0;
++ p->se.prev_sum_exec_runtime = 0;
++ p->nvcsw = 0;
++ p->nivcsw = 0;
+ }
+--- linux-2.6.23.orig/kernel/sched_fair.c
++++ linux-2.6.23/kernel/sched_fair.c
+@@ -20,29 +20,38 @@
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+ /*
+ * Targeted preemption latency for CPU-bound tasks:
+- * (default: 20ms, units: nanoseconds)
++ * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * NOTE: this latency value is not the same as the concept of
+- * 'timeslice length' - timeslices in CFS are of variable length.
+- * (to see the precise effective timeslice length of your workload,
+- * run vmstat and monitor the context-switches field)
++ * 'timeslice length' - timeslices in CFS are of variable length
++ * and have no persistent notion like in traditional, time-slice
++ * based scheduling concepts.
+ *
+- * On SMP systems the value of this is multiplied by the log2 of the
+- * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
+- * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
+- * Targeted preemption latency for CPU-bound tasks:
++ * (to see the precise effective timeslice length of your workload,
++ * run vmstat and monitor the context-switches (cs) field)
+ */
+-unsigned int sysctl_sched_latency __read_mostly = 20000000ULL;
++unsigned int sysctl_sched_latency = 20000000ULL;
+
+ /*
+ * Minimal preemption granularity for CPU-bound tasks:
+- * (default: 2 msec, units: nanoseconds)
++ * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
++ */
++unsigned int sysctl_sched_min_granularity = 4000000ULL;
++
++/*
++ * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
+ */
+-unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
++static unsigned int sched_nr_latency = 5;
++
++/*
++ * After fork, child runs first. (default) If set to 0 then
++ * parent will (try to) run first.
++ */
++const_debug unsigned int sysctl_sched_child_runs_first = 1;
+
+ /*
+ * sys_sched_yield() compat mode
+ *
+ * This option switches the agressive yield implementation of the
+@@ -50,56 +59,29 @@ unsigned int sysctl_sched_min_granularit
+ */
+ unsigned int __read_mostly sysctl_sched_compat_yield;
+
+ /*
+ * SCHED_BATCH wake-up granularity.
+- * (default: 25 msec, units: nanoseconds)
++ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+-unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
++unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
+
+ /*
+ * SCHED_OTHER wake-up granularity.
+- * (default: 1 msec, units: nanoseconds)
++ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+-unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
+-
+-unsigned int sysctl_sched_stat_granularity __read_mostly;
+-
+-/*
+- * Initialized in sched_init_granularity() [to 5 times the base granularity]:
+- */
+-unsigned int sysctl_sched_runtime_limit __read_mostly;
+-
+-/*
+- * Debugging: various feature bits
+- */
+-enum {
+- SCHED_FEAT_FAIR_SLEEPERS = 1,
+- SCHED_FEAT_SLEEPER_AVG = 2,
+- SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
+- SCHED_FEAT_PRECISE_CPU_LOAD = 8,
+- SCHED_FEAT_START_DEBIT = 16,
+- SCHED_FEAT_SKIP_INITIAL = 32,
+-};
+-
+-unsigned int sysctl_sched_features __read_mostly =
+- SCHED_FEAT_FAIR_SLEEPERS *1 |
+- SCHED_FEAT_SLEEPER_AVG *0 |
+- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
+- SCHED_FEAT_PRECISE_CPU_LOAD *0 |
+- SCHED_FEAT_START_DEBIT *1 |
+- SCHED_FEAT_SKIP_INITIAL *0;
++unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
+
+-extern struct sched_class fair_sched_class;
++const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+
+ /**************************************************************
+ * CFS operations on generic schedulable entities:
+ */
+
+@@ -109,47 +91,22 @@ extern struct sched_class fair_sched_cla
+ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+ {
+ return cfs_rq->rq;
+ }
+
+-/* currently running entity (if any) on this cfs_rq */
+-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+-{
+- return cfs_rq->curr;
+-}
+-
+ /* An entity is a task if it doesn't "own" a runqueue */
+ #define entity_is_task(se) (!se->my_q)
+
+-static inline void
+-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+- cfs_rq->curr = se;
+-}
+-
+ #else /* CONFIG_FAIR_GROUP_SCHED */
+
+ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+ {
+ return container_of(cfs_rq, struct rq, cfs);
+ }
+
+-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+-{
+- struct rq *rq = rq_of(cfs_rq);
+-
+- if (unlikely(rq->curr->sched_class != &fair_sched_class))
+- return NULL;
+-
+- return &rq->curr->se;
+-}
+-
+ #define entity_is_task(se) 1
+
+-static inline void
+-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+-
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+
+ static inline struct task_struct *task_of(struct sched_entity *se)
+ {
+ return container_of(se, struct task_struct, se);
+@@ -158,20 +115,42 @@ static inline struct task_struct *task_o
+
+ /**************************************************************
+ * Scheduling class tree data structure manipulation methods:
+ */
+
++static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
++{
++ s64 delta = (s64)(vruntime - min_vruntime);
++ if (delta > 0)
++ min_vruntime = vruntime;
++
++ return min_vruntime;
++}
++
++static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
++{
++ s64 delta = (s64)(vruntime - min_vruntime);
++ if (delta < 0)
++ min_vruntime = vruntime;
++
++ return min_vruntime;
++}
++
++static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
++ return se->vruntime - cfs_rq->min_vruntime;
++}
++
+ /*
+ * Enqueue an entity into the rb-tree:
+ */
+-static inline void
+-__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct rb_node *parent = NULL;
+ struct sched_entity *entry;
+- s64 key = se->fair_key;
++ s64 key = entity_key(cfs_rq, se);
+ int leftmost = 1;
+
+ /*
+ * Find the right place in the rbtree:
+ */
+@@ -180,11 +159,11 @@ __enqueue_entity(struct cfs_rq *cfs_rq,
+ entry = rb_entry(parent, struct sched_entity, run_node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same key stay together.
+ */
+- if (key - entry->fair_key < 0) {
++ if (key < entity_key(cfs_rq, entry)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = 0;
+ }
+@@ -197,28 +176,18 @@ __enqueue_entity(struct cfs_rq *cfs_rq,
+ if (leftmost)
+ cfs_rq->rb_leftmost = &se->run_node;
+
+ rb_link_node(&se->run_node, parent, link);
+ rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+- update_load_add(&cfs_rq->load, se->load.weight);
+- cfs_rq->nr_running++;
+- se->on_rq = 1;
+-
+- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+ }
+
+-static inline void
+-__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ if (cfs_rq->rb_leftmost == &se->run_node)
+ cfs_rq->rb_leftmost = rb_next(&se->run_node);
+- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+- update_load_sub(&cfs_rq->load, se->load.weight);
+- cfs_rq->nr_running--;
+- se->on_rq = 0;
+
+- schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
++ rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+ }
+
+ static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
+ {
+ return cfs_rq->rb_leftmost;
+@@ -227,308 +196,206 @@ static inline struct rb_node *first_fair
+ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+ {
+ return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
+ }
+
++static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
++{
++ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
++ struct sched_entity *se = NULL;
++ struct rb_node *parent;
++
++ while (*link) {
++ parent = *link;
++ se = rb_entry(parent, struct sched_entity, run_node);
++ link = &parent->rb_right;
++ }
++
++ return se;
++}
++
+ /**************************************************************
+ * Scheduling class statistics methods:
+ */
+
++#ifdef CONFIG_SCHED_DEBUG
++int sched_nr_latency_handler(struct ctl_table *table, int write,
++ struct file *filp, void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
++
++ if (ret || !write)
++ return ret;
++
++ sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
++ sysctl_sched_min_granularity);
++
++ return 0;
++}
++#endif
++
+ /*
+- * Calculate the preemption granularity needed to schedule every
+- * runnable task once per sysctl_sched_latency amount of time.
+- * (down to a sensible low limit on granularity)
+- *
+- * For example, if there are 2 tasks running and latency is 10 msecs,
+- * we switch tasks every 5 msecs. If we have 3 tasks running, we have
+- * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
+- * for each task. We do finer and finer scheduling up to until we
+- * reach the minimum granularity value.
+- *
+- * To achieve this we use the following dynamic-granularity rule:
+- *
+- * gran = lat/nr - lat/nr/nr
++ * The idea is to set a period in which each task runs once.
+ *
+- * This comes out of the following equations:
++ * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
++ * this period because otherwise the slices get too small.
+ *
+- * kA1 + gran = kB1
+- * kB2 + gran = kA2
+- * kA2 = kA1
+- * kB2 = kB1 - d + d/nr
+- * lat = d * nr
+- *
+- * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
+- * '1' is start of time, '2' is end of time, 'd' is delay between
+- * 1 and 2 (during which task B was running), 'nr' is number of tasks
+- * running, 'lat' is the the period of each task. ('lat' is the
+- * sched_latency that we aim for.)
++ * p = (nr <= nl) ? l : l*nr/nl
+ */
+-static long
+-sched_granularity(struct cfs_rq *cfs_rq)
++static u64 __sched_period(unsigned long nr_running)
+ {
+- unsigned int gran = sysctl_sched_latency;
+- unsigned int nr = cfs_rq->nr_running;
++ u64 period = sysctl_sched_latency;
++ unsigned long nr_latency = sched_nr_latency;
+
+- if (nr > 1) {
+- gran = gran/nr - gran/nr/nr;
+- gran = max(gran, sysctl_sched_min_granularity);
++ if (unlikely(nr_running > nr_latency)) {
++ period *= nr_running;
++ do_div(period, nr_latency);
+ }
+
+- return gran;
++ return period;
+ }
+
+ /*
+- * We rescale the rescheduling granularity of tasks according to their
+- * nice level, but only linearly, not exponentially:
++ * We calculate the wall-time slice from the period by taking a part
++ * proportional to the weight.
++ *
++ * s = p*w/rw
+ */
+-static long
+-niced_granularity(struct sched_entity *curr, unsigned long granularity)
++static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- u64 tmp;
++ u64 slice = __sched_period(cfs_rq->nr_running);
+
+- if (likely(curr->load.weight == NICE_0_LOAD))
+- return granularity;
+- /*
+- * Positive nice levels get the same granularity as nice-0:
+- */
+- if (likely(curr->load.weight < NICE_0_LOAD)) {
+- tmp = curr->load.weight * (u64)granularity;
+- return (long) (tmp >> NICE_0_SHIFT);
+- }
+- /*
+- * Negative nice level tasks get linearly finer
+- * granularity:
+- */
+- tmp = curr->load.inv_weight * (u64)granularity;
++ slice *= se->load.weight;
++ do_div(slice, cfs_rq->load.weight);
+
+- /*
+- * It will always fit into 'long':
+- */
+- return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
++ return slice;
+ }
+
+-static inline void
+-limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
++/*
++ * We calculate the vruntime slice.
++ *
++ * vs = s/w = p/rw
++ */
++static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
+ {
+- long limit = sysctl_sched_runtime_limit;
++ u64 vslice = __sched_period(nr_running);
+
+- /*
+- * Niced tasks have the same history dynamic range as
+- * non-niced tasks:
+- */
+- if (unlikely(se->wait_runtime > limit)) {
+- se->wait_runtime = limit;
+- schedstat_inc(se, wait_runtime_overruns);
+- schedstat_inc(cfs_rq, wait_runtime_overruns);
+- }
+- if (unlikely(se->wait_runtime < -limit)) {
+- se->wait_runtime = -limit;
+- schedstat_inc(se, wait_runtime_underruns);
+- schedstat_inc(cfs_rq, wait_runtime_underruns);
+- }
++ vslice *= NICE_0_LOAD;
++ do_div(vslice, rq_weight);
++
++ return vslice;
+ }
+
+-static inline void
+-__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
++static u64 sched_vslice(struct cfs_rq *cfs_rq)
+ {
+- se->wait_runtime += delta;
+- schedstat_add(se, sum_wait_runtime, delta);
+- limit_wait_runtime(cfs_rq, se);
++ return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
+ }
+
+-static void
+-add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
++static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
+- __add_wait_runtime(cfs_rq, se, delta);
+- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
++ return __sched_vslice(cfs_rq->load.weight + se->load.weight,
++ cfs_rq->nr_running + 1);
+ }
+
+ /*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+ static inline void
+-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
++__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
++ unsigned long delta_exec)
+ {
+- unsigned long delta, delta_exec, delta_fair, delta_mine;
+- struct load_weight *lw = &cfs_rq->load;
+- unsigned long load = lw->weight;
++ unsigned long delta_exec_weighted;
++ u64 vruntime;
+
+- delta_exec = curr->delta_exec;
+ schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
+
+ curr->sum_exec_runtime += delta_exec;
+- cfs_rq->exec_clock += delta_exec;
+-
+- if (unlikely(!load))
+- return;
+-
+- delta_fair = calc_delta_fair(delta_exec, lw);
+- delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
+-
+- if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
+- delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
+- delta = min(delta, (unsigned long)(
+- (long)sysctl_sched_runtime_limit - curr->wait_runtime));
+- cfs_rq->sleeper_bonus -= delta;
+- delta_mine -= delta;
++ schedstat_add(cfs_rq, exec_clock, delta_exec);
++ delta_exec_weighted = delta_exec;
++ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
++ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
++ &curr->load);
+ }
++ curr->vruntime += delta_exec_weighted;
+
+- cfs_rq->fair_clock += delta_fair;
+ /*
+- * We executed delta_exec amount of time on the CPU,
+- * but we were only entitled to delta_mine amount of
+- * time during that period (if nr_running == 1 then
+- * the two values are equal)
+- * [Note: delta_mine - delta_exec is negative]:
++ * maintain cfs_rq->min_vruntime to be a monotonic increasing
++ * value tracking the leftmost vruntime in the tree.
+ */
+- add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
++ if (first_fair(cfs_rq)) {
++ vruntime = min_vruntime(curr->vruntime,
++ __pick_next_entity(cfs_rq)->vruntime);
++ } else
++ vruntime = curr->vruntime;
++
++ cfs_rq->min_vruntime =
++ max_vruntime(cfs_rq->min_vruntime, vruntime);
+ }
+
+ static void update_curr(struct cfs_rq *cfs_rq)
+ {
+- struct sched_entity *curr = cfs_rq_curr(cfs_rq);
++ struct sched_entity *curr = cfs_rq->curr;
++ u64 now = rq_of(cfs_rq)->clock;
+ unsigned long delta_exec;
+
+ if (unlikely(!curr))
+ return;
+
+ /*
+ * Get the amount of time the current task was running
+ * since the last time we changed load (this cannot
+ * overflow on 32 bits):
+ */
+- delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
++ delta_exec = (unsigned long)(now - curr->exec_start);
++
++ __update_curr(cfs_rq, curr, delta_exec);
++ curr->exec_start = now;
+
+- curr->delta_exec += delta_exec;
++ if (entity_is_task(curr)) {
++ struct task_struct *curtask = task_of(curr);
+
+- if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
+- __update_curr(cfs_rq, curr);
+- curr->delta_exec = 0;
++ cpuacct_charge(curtask, delta_exec);
+ }
+- curr->exec_start = rq_of(cfs_rq)->clock;
+ }
+
+ static inline void
+ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- se->wait_start_fair = cfs_rq->fair_clock;
+ schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
+ }
+
+ /*
+- * We calculate fair deltas here, so protect against the random effects
+- * of a multiplication overflow by capping it to the runtime limit:
+- */
+-#if BITS_PER_LONG == 32
+-static inline unsigned long
+-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+-{
+- u64 tmp = (u64)delta * weight >> shift;
+-
+- if (unlikely(tmp > sysctl_sched_runtime_limit*2))
+- return sysctl_sched_runtime_limit*2;
+- return tmp;
+-}
+-#else
+-static inline unsigned long
+-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+-{
+- return delta * weight >> shift;
+-}
+-#endif
+-
+-/*
+ * Task is being enqueued - update stats:
+ */
+ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- s64 key;
+-
+ /*
+ * Are we enqueueing a waiting task? (for current tasks
+ * a dequeue/enqueue event is a NOP)
+ */
+- if (se != cfs_rq_curr(cfs_rq))
++ if (se != cfs_rq->curr)
+ update_stats_wait_start(cfs_rq, se);
+- /*
+- * Update the key:
+- */
+- key = cfs_rq->fair_clock;
+-
+- /*
+- * Optimize the common nice 0 case:
+- */
+- if (likely(se->load.weight == NICE_0_LOAD)) {
+- key -= se->wait_runtime;
+- } else {
+- u64 tmp;
+-
+- if (se->wait_runtime < 0) {
+- tmp = -se->wait_runtime;
+- key += (tmp * se->load.inv_weight) >>
+- (WMULT_SHIFT - NICE_0_SHIFT);
+- } else {
+- tmp = se->wait_runtime;
+- key -= (tmp * se->load.inv_weight) >>
+- (WMULT_SHIFT - NICE_0_SHIFT);
+- }
+- }
+-
+- se->fair_key = key;
+-}
+-
+-/*
+- * Note: must be called with a freshly updated rq->fair_clock.
+- */
+-static inline void
+-__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+- unsigned long delta_fair = se->delta_fair_run;
+-
+- schedstat_set(se->wait_max, max(se->wait_max,
+- rq_of(cfs_rq)->clock - se->wait_start));
+-
+- if (unlikely(se->load.weight != NICE_0_LOAD))
+- delta_fair = calc_weighted(delta_fair, se->load.weight,
+- NICE_0_SHIFT);
+-
+- add_wait_runtime(cfs_rq, se, delta_fair);
+ }
+
+ static void
+ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long delta_fair;
+-
+- if (unlikely(!se->wait_start_fair))
+- return;
+-
+- delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+- (u64)(cfs_rq->fair_clock - se->wait_start_fair));
+-
+- se->delta_fair_run += delta_fair;
+- if (unlikely(abs(se->delta_fair_run) >=
+- sysctl_sched_stat_granularity)) {
+- __update_stats_wait_end(cfs_rq, se);
+- se->delta_fair_run = 0;
+- }
+-
+- se->wait_start_fair = 0;
++ schedstat_set(se->wait_max, max(se->wait_max,
++ rq_of(cfs_rq)->clock - se->wait_start));
+ schedstat_set(se->wait_start, 0);
+ }
+
+ static inline void
+ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- update_curr(cfs_rq);
+ /*
+ * Mark the end of the wait period if dequeueing a
+ * waiting task:
+ */
+- if (se != cfs_rq_curr(cfs_rq))
++ if (se != cfs_rq->curr)
+ update_stats_wait_end(cfs_rq, se);
+ }
+
+ /*
+ * We are picking a new current task - update its stats:
+@@ -540,83 +407,32 @@ update_stats_curr_start(struct cfs_rq *c
+ * We are starting a new run period:
+ */
+ se->exec_start = rq_of(cfs_rq)->clock;
+ }
+
+-/*
+- * We are descheduling a task - update its stats:
+- */
+-static inline void
+-update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+- se->exec_start = 0;
+-}
+-
+ /**************************************************
+ * Scheduling class queueing methods:
+ */
+
+-static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void
++account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long load = cfs_rq->load.weight, delta_fair;
+- long prev_runtime;
+-
+- /*
+- * Do not boost sleepers if there's too much bonus 'in flight'
+- * already:
+- */
+- if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
+- return;
+-
+- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
+- load = rq_of(cfs_rq)->cpu_load[2];
+-
+- delta_fair = se->delta_fair_sleep;
+-
+- /*
+- * Fix up delta_fair with the effect of us running
+- * during the whole sleep period:
+- */
+- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
+- delta_fair = div64_likely32((u64)delta_fair * load,
+- load + se->load.weight);
+-
+- if (unlikely(se->load.weight != NICE_0_LOAD))
+- delta_fair = calc_weighted(delta_fair, se->load.weight,
+- NICE_0_SHIFT);
+-
+- prev_runtime = se->wait_runtime;
+- __add_wait_runtime(cfs_rq, se, delta_fair);
+- delta_fair = se->wait_runtime - prev_runtime;
++ update_load_add(&cfs_rq->load, se->load.weight);
++ cfs_rq->nr_running++;
++ se->on_rq = 1;
++}
+
+- /*
+- * Track the amount of bonus we've given to sleepers:
+- */
+- cfs_rq->sleeper_bonus += delta_fair;
++static void
++account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
++ update_load_sub(&cfs_rq->load, se->load.weight);
++ cfs_rq->nr_running--;
++ se->on_rq = 0;
+ }
+
+ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- struct task_struct *tsk = task_of(se);
+- unsigned long delta_fair;
+-
+- if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
+- !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
+- return;
+-
+- delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+- (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
+-
+- se->delta_fair_sleep += delta_fair;
+- if (unlikely(abs(se->delta_fair_sleep) >=
+- sysctl_sched_stat_granularity)) {
+- __enqueue_sleeper(cfs_rq, se);
+- se->delta_fair_sleep = 0;
+- }
+-
+- se->sleep_start_fair = 0;
+-
+ #ifdef CONFIG_SCHEDSTATS
+ if (se->sleep_start) {
+ u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
+
+ if ((s64)delta < 0)
+@@ -644,38 +460,99 @@ static void enqueue_sleeper(struct cfs_r
+ * Blocking time is in units of nanosecs, so shift by 20 to
+ * get a milliseconds-range estimation of the amount of
+ * time that the task spent sleeping:
+ */
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
++ struct task_struct *tsk = task_of(se);
++
+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
+ delta >> 20);
+ }
+ }
+ #endif
+ }
+
++static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
++#ifdef CONFIG_SCHED_DEBUG
++ s64 d = se->vruntime - cfs_rq->min_vruntime;
++
++ if (d < 0)
++ d = -d;
++
++ if (d > 3*sysctl_sched_latency)
++ schedstat_inc(cfs_rq, nr_spread_over);
++#endif
++}
++
++static void
++place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
++{
++ u64 vruntime;
++
++ vruntime = cfs_rq->min_vruntime;
++
++ if (sched_feat(TREE_AVG)) {
++ struct sched_entity *last = __pick_last_entity(cfs_rq);
++ if (last) {
++ vruntime += last->vruntime;
++ vruntime >>= 1;
++ }
++ } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
++ vruntime += sched_vslice(cfs_rq)/2;
++
++ /*
++ * The 'current' period is already promised to the current tasks,
++ * however the extra weight of the new task will slow them down a
++ * little, place the new task so that it fits in the slot that
++ * stays open at the end.
++ */
++ if (initial && sched_feat(START_DEBIT))
++ vruntime += sched_vslice_add(cfs_rq, se);
++
++ if (!initial) {
++ /* sleeps upto a single latency don't count. */
++ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
++ vruntime -= sysctl_sched_latency;
++
++ /* ensure we never gain time by being placed backwards. */
++ vruntime = max_vruntime(se->vruntime, vruntime);
++ }
++
++ se->vruntime = vruntime;
++}
++
+ static void
+ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
+ {
+ /*
+- * Update the fair clock.
++ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+
+- if (wakeup)
++ if (wakeup) {
++ place_entity(cfs_rq, se, 0);
+ enqueue_sleeper(cfs_rq, se);
++ }
+
+ update_stats_enqueue(cfs_rq, se);
+- __enqueue_entity(cfs_rq, se);
++ check_spread(cfs_rq, se);
++ if (se != cfs_rq->curr)
++ __enqueue_entity(cfs_rq, se);
++ account_entity_enqueue(cfs_rq, se);
+ }
+
+ static void
+ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
+ {
++ /*
++ * Update run-time statistics of the 'current'.
++ */
++ update_curr(cfs_rq);
++
+ update_stats_dequeue(cfs_rq, se);
+ if (sleep) {
+- se->sleep_start_fair = cfs_rq->fair_clock;
+ #ifdef CONFIG_SCHEDSTATS
+ if (entity_is_task(se)) {
+ struct task_struct *tsk = task_of(se);
+
+ if (tsk->state & TASK_INTERRUPTIBLE)
+@@ -683,72 +560,68 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+ if (tsk->state & TASK_UNINTERRUPTIBLE)
+ se->block_start = rq_of(cfs_rq)->clock;
+ }
+ #endif
+ }
+- __dequeue_entity(cfs_rq, se);
++
++ if (se != cfs_rq->curr)
++ __dequeue_entity(cfs_rq, se);
++ account_entity_dequeue(cfs_rq, se);
+ }
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+ static void
+-__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
+- struct sched_entity *curr, unsigned long granularity)
++check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ {
+- s64 __delta = curr->fair_key - se->fair_key;
+ unsigned long ideal_runtime, delta_exec;
+
+- /*
+- * ideal_runtime is compared against sum_exec_runtime, which is
+- * walltime, hence do not scale.
+- */
+- ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
+- (unsigned long)sysctl_sched_min_granularity);
+-
+- /*
+- * If we executed more than what the latency constraint suggests,
+- * reduce the rescheduling granularity. This way the total latency
+- * of how much a task is not scheduled converges to
+- * sysctl_sched_latency:
+- */
++ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime)
+- granularity = 0;
+-
+- /*
+- * Take scheduling granularity into account - do not
+- * preempt the current task unless the best task has
+- * a larger than sched_granularity fairness advantage:
+- *
+- * scale granularity as key space is in fair_clock.
+- */
+- if (__delta > niced_granularity(curr, granularity))
+ resched_task(rq_of(cfs_rq)->curr);
+ }
+
+-static inline void
++static void
+ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- /*
+- * Any task has to be enqueued before it get to execute on
+- * a CPU. So account for the time it spent waiting on the
+- * runqueue. (note, here we rely on pick_next_task() having
+- * done a put_prev_task_fair() shortly before this, which
+- * updated rq->fair_clock - used by update_stats_wait_end())
+- */
+- update_stats_wait_end(cfs_rq, se);
++ /* 'current' is not kept within the tree. */
++ if (se->on_rq) {
++ /*
++ * Any task has to be enqueued before it get to execute on
++ * a CPU. So account for the time it spent waiting on the
++ * runqueue.
++ */
++ update_stats_wait_end(cfs_rq, se);
++ __dequeue_entity(cfs_rq, se);
++ }
++
+ update_stats_curr_start(cfs_rq, se);
+- set_cfs_rq_curr(cfs_rq, se);
++ cfs_rq->curr = se;
++#ifdef CONFIG_SCHEDSTATS
++ /*
++ * Track our maximum slice length, if the CPU's load is at
++ * least twice that of our own weight (i.e. dont track it
++ * when there are only lesser-weight tasks around):
++ */
++ if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
++ se->slice_max = max(se->slice_max,
++ se->sum_exec_runtime - se->prev_sum_exec_runtime);
++ }
++#endif
+ se->prev_sum_exec_runtime = se->sum_exec_runtime;
+ }
+
+ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
+ {
+- struct sched_entity *se = __pick_next_entity(cfs_rq);
++ struct sched_entity *se = NULL;
+
+- set_next_entity(cfs_rq, se);
++ if (first_fair(cfs_rq)) {
++ se = __pick_next_entity(cfs_rq);
++ set_next_entity(cfs_rq, se);
++ }
+
+ return se;
+ }
+
+ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
+@@ -758,37 +631,28 @@ static void put_prev_entity(struct cfs_r
+ * was not called and update_curr() has to be done:
+ */
+ if (prev->on_rq)
+ update_curr(cfs_rq);
+
+- update_stats_curr_end(cfs_rq, prev);
+-
+- if (prev->on_rq)
++ check_spread(cfs_rq, prev);
++ if (prev->on_rq) {
+ update_stats_wait_start(cfs_rq, prev);
+- set_cfs_rq_curr(cfs_rq, NULL);
++ /* Put 'current' back into the tree. */
++ __enqueue_entity(cfs_rq, prev);
++ }
++ cfs_rq->curr = NULL;
+ }
+
+ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ {
+- struct sched_entity *next;
+-
+- /*
+- * Dequeue and enqueue the task to update its
+- * position within the tree:
+- */
+- dequeue_entity(cfs_rq, curr, 0);
+- enqueue_entity(cfs_rq, curr, 0);
+-
+ /*
+- * Reschedule if another task tops the current one.
++ * Update run-time statistics of the 'current'.
+ */
+- next = __pick_next_entity(cfs_rq);
+- if (next == curr)
+- return;
++ update_curr(cfs_rq);
+
+- __check_preempt_curr_fair(cfs_rq, next, curr,
+- sched_granularity(cfs_rq));
++ if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
++ check_preempt_tick(cfs_rq, curr);
+ }
+
+ /**************************************************
+ * CFS operations on tasks:
+ */
+@@ -819,27 +683,32 @@ static inline struct cfs_rq *group_cfs_r
+ /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
+ * another cpu ('this_cpu')
+ */
+ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+ {
+- /* A later patch will take group into account */
+- return &cpu_rq(this_cpu)->cfs;
++ return cfs_rq->tg->cfs_rq[this_cpu];
+ }
+
+ /* Iterate thr' all leaf cfs_rq's on a runqueue */
+ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
+ list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+-/* Do the two (enqueued) tasks belong to the same group ? */
+-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
++/* Do the two (enqueued) entities belong to the same group ? */
++static inline int
++is_same_group(struct sched_entity *se, struct sched_entity *pse)
+ {
+- if (curr->se.cfs_rq == p->se.cfs_rq)
++ if (se->cfs_rq == pse->cfs_rq)
+ return 1;
+
+ return 0;
+ }
+
++static inline struct sched_entity *parent_entity(struct sched_entity *se)
++{
++ return se->parent;
++}
++
+ #else /* CONFIG_FAIR_GROUP_SCHED */
+
+ #define for_each_sched_entity(se) \
+ for (; se; se = NULL)
+
+@@ -868,15 +737,21 @@ static inline struct cfs_rq *cpu_cfs_rq(
+ }
+
+ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
+ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
++static inline int
++is_same_group(struct sched_entity *se, struct sched_entity *pse)
+ {
+ return 1;
+ }
+
++static inline struct sched_entity *parent_entity(struct sched_entity *se)
++{
++ return NULL;
++}
++
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+
+ /*
+ * The enqueue_task method is called before nr_running is
+ * increased. Here we update the fair scheduling stats and
+@@ -890,10 +765,11 @@ static void enqueue_task_fair(struct rq
+ for_each_sched_entity(se) {
+ if (se->on_rq)
+ break;
+ cfs_rq = cfs_rq_of(se);
+ enqueue_entity(cfs_rq, se, wakeup);
++ wakeup = 1;
+ }
+ }
+
+ /*
+ * The dequeue_task method is called before nr_running is
+@@ -909,97 +785,95 @@ static void dequeue_task_fair(struct rq
+ cfs_rq = cfs_rq_of(se);
+ dequeue_entity(cfs_rq, se, sleep);
+ /* Don't dequeue parent if it has other entities besides us */
+ if (cfs_rq->load.weight)
+ break;
++ sleep = 1;
+ }
+ }
+
+ /*
+ * sched_yield() support is very simple - we dequeue and enqueue.
+ *
+ * If compat_yield is turned on then we requeue to the end of the tree.
+ */
+-static void yield_task_fair(struct rq *rq, struct task_struct *p)
++static void yield_task_fair(struct rq *rq)
+ {
+- struct cfs_rq *cfs_rq = task_cfs_rq(p);
+- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+- struct sched_entity *rightmost, *se = &p->se;
+- struct rb_node *parent;
++ struct task_struct *curr = rq->curr;
++ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
++ struct sched_entity *rightmost, *se = &curr->se;
+
+ /*
+ * Are we the only task in the tree?
+ */
+ if (unlikely(cfs_rq->nr_running == 1))
+ return;
+
+- if (likely(!sysctl_sched_compat_yield)) {
++ if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
+ __update_rq_clock(rq);
+ /*
+- * Dequeue and enqueue the task to update its
+- * position within the tree:
++ * Update run-time statistics of the 'current'.
+ */
+- dequeue_entity(cfs_rq, &p->se, 0);
+- enqueue_entity(cfs_rq, &p->se, 0);
++ update_curr(cfs_rq);
+
+ return;
+ }
+ /*
+ * Find the rightmost entry in the rbtree:
+ */
+- do {
+- parent = *link;
+- link = &parent->rb_right;
+- } while (*link);
+-
+- rightmost = rb_entry(parent, struct sched_entity, run_node);
++ rightmost = __pick_last_entity(cfs_rq);
+ /*
+ * Already in the rightmost position?
+ */
+- if (unlikely(rightmost == se))
++ if (unlikely(rightmost->vruntime < se->vruntime))
+ return;
+
+ /*
+ * Minimally necessary key value to be last in the tree:
++ * Upon rescheduling, sched_class::put_prev_task() will place
++ * 'current' within the tree based on its new key value.
+ */
+- se->fair_key = rightmost->fair_key + 1;
+-
+- if (cfs_rq->rb_leftmost == &se->run_node)
+- cfs_rq->rb_leftmost = rb_next(&se->run_node);
+- /*
+- * Relink the task to the rightmost position:
+- */
+- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+- rb_link_node(&se->run_node, parent, link);
+- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
++ se->vruntime = rightmost->vruntime + 1;
+ }
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+-static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
++static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
+ {
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
++ struct sched_entity *se = &curr->se, *pse = &p->se;
+ unsigned long gran;
+
+ if (unlikely(rt_prio(p->prio))) {
+ update_rq_clock(rq);
+ update_curr(cfs_rq);
+ resched_task(curr);
+ return;
+ }
+-
+- gran = sysctl_sched_wakeup_granularity;
+ /*
+- * Batch tasks prefer throughput over latency:
++ * Batch tasks do not preempt (their preemption is driven by
++ * the tick):
+ */
+ if (unlikely(p->policy == SCHED_BATCH))
+- gran = sysctl_sched_batch_wakeup_granularity;
++ return;
++
++ if (!sched_feat(WAKEUP_PREEMPT))
++ return;
++
++ while (!is_same_group(se, pse)) {
++ se = parent_entity(se);
++ pse = parent_entity(pse);
++ }
+
+- if (is_same_group(curr, p))
+- __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
++ gran = sysctl_sched_wakeup_granularity;
++ if (unlikely(se->load.weight != NICE_0_LOAD))
++ gran = calc_delta_fair(gran, &se->load);
++
++ if (pse->vruntime + gran < se->vruntime)
++ resched_task(curr);
+ }
+
+ static struct task_struct *pick_next_task_fair(struct rq *rq)
+ {
+ struct cfs_rq *cfs_rq = &rq->cfs;
+@@ -1028,10 +902,11 @@ static void put_prev_task_fair(struct rq
+ cfs_rq = cfs_rq_of(se);
+ put_prev_entity(cfs_rq, se);
+ }
+ }
+
++#ifdef CONFIG_SMP
+ /**************************************************
+ * Fair scheduling class load-balancing methods:
+ */
+
+ /*
+@@ -1039,11 +914,11 @@ static void put_prev_task_fair(struct rq
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+ * the current task:
+ */
+-static inline struct task_struct *
++static struct task_struct *
+ __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+ {
+ struct task_struct *p;
+
+ if (!curr)
+@@ -1076,25 +951,27 @@ static int cfs_rq_best_prio(struct cfs_r
+ struct task_struct *p;
+
+ if (!cfs_rq->nr_running)
+ return MAX_PRIO;
+
+- curr = __pick_next_entity(cfs_rq);
++ curr = cfs_rq->curr;
++ if (!curr)
++ curr = __pick_next_entity(cfs_rq);
++
+ p = task_of(curr);
+
+ return p->prio;
+ }
+ #endif
+
+ static unsigned long
+ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
++ unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, int *this_best_prio)
+ {
+ struct cfs_rq *busy_cfs_rq;
+- unsigned long load_moved, total_nr_moved = 0, nr_moved;
+ long rem_load_move = max_load_move;
+ struct rq_iterator cfs_rq_iterator;
+
+ cfs_rq_iterator.start = load_balance_start_fair;
+ cfs_rq_iterator.next = load_balance_next_fair;
+@@ -1118,29 +995,52 @@ load_balance_fair(struct rq *this_rq, in
+
+ *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+ #else
+ # define maxload rem_load_move
+ #endif
+- /* pass busy_cfs_rq argument into
++ /*
++ * pass busy_cfs_rq argument into
+ * load_balance_[start|next]_fair iterators
+ */
+ cfs_rq_iterator.arg = busy_cfs_rq;
+- nr_moved = balance_tasks(this_rq, this_cpu, busiest,
+- max_nr_move, maxload, sd, idle, all_pinned,
+- &load_moved, this_best_prio, &cfs_rq_iterator);
+-
+- total_nr_moved += nr_moved;
+- max_nr_move -= nr_moved;
+- rem_load_move -= load_moved;
++ rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
++ maxload, sd, idle, all_pinned,
++ this_best_prio,
++ &cfs_rq_iterator);
+
+- if (max_nr_move <= 0 || rem_load_move <= 0)
++ if (rem_load_move <= 0)
+ break;
+ }
+
+ return max_load_move - rem_load_move;
+ }
+
++static int
++move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ struct cfs_rq *busy_cfs_rq;
++ struct rq_iterator cfs_rq_iterator;
++
++ cfs_rq_iterator.start = load_balance_start_fair;
++ cfs_rq_iterator.next = load_balance_next_fair;
++
++ for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
++ /*
++ * pass busy_cfs_rq argument into
++ * load_balance_[start|next]_fair iterators
++ */
++ cfs_rq_iterator.arg = busy_cfs_rq;
++ if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
++ &cfs_rq_iterator))
++ return 1;
++ }
++
++ return 0;
++}
++#endif
++
+ /*
+ * scheduler tick hitting a task of our scheduling class:
+ */
+ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
+ {
+@@ -1151,51 +1051,44 @@ static void task_tick_fair(struct rq *rq
+ cfs_rq = cfs_rq_of(se);
+ entity_tick(cfs_rq, se);
+ }
+ }
+
++#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
++
+ /*
+ * Share the fairness runtime between parent and child, thus the
+ * total amount of pressure for CPU stays equal - new tasks
+ * get a chance to run but frequent forkers are not allowed to
+ * monopolize the CPU. Note: the parent runqueue is locked,
+ * the child is not running yet.
+ */
+ static void task_new_fair(struct rq *rq, struct task_struct *p)
+ {
+ struct cfs_rq *cfs_rq = task_cfs_rq(p);
+- struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
++ struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
++ int this_cpu = smp_processor_id();
+
+ sched_info_queued(p);
+
+ update_curr(cfs_rq);
+- update_stats_enqueue(cfs_rq, se);
+- /*
+- * Child runs first: we let it run before the parent
+- * until it reschedules once. We set up the key so that
+- * it will preempt the parent:
+- */
+- se->fair_key = curr->fair_key -
+- niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
+- /*
+- * The first wait is dominated by the child-runs-first logic,
+- * so do not credit it with that waiting time yet:
+- */
+- if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
+- se->wait_start_fair = 0;
++ place_entity(cfs_rq, se, 1);
+
+- /*
+- * The statistical average of wait_runtime is about
+- * -granularity/2, so initialize the task with that:
+- */
+- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
+- se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
++ /* 'curr' will be NULL if the child belongs to a different group */
++ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
++ curr && curr->vruntime < se->vruntime) {
++ /*
++ * Upon rescheduling, sched_class::put_prev_task() will place
++ * 'current' within the tree based on its new key value.
++ */
++ swap(curr->vruntime, se->vruntime);
++ }
+
+- __enqueue_entity(cfs_rq, se);
++ enqueue_task_fair(rq, p, 0);
++ resched_task(rq->curr);
+ }
+
+-#ifdef CONFIG_FAIR_GROUP_SCHED
+ /* Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+@@ -1204,30 +1097,29 @@ static void set_curr_task_fair(struct rq
+ struct sched_entity *se = &rq->curr->se;
+
+ for_each_sched_entity(se)
+ set_next_entity(cfs_rq_of(se), se);
+ }
+-#else
+-static void set_curr_task_fair(struct rq *rq)
+-{
+-}
+-#endif
+
+ /*
+ * All the scheduling class methods:
+ */
+-struct sched_class fair_sched_class __read_mostly = {
++static const struct sched_class fair_sched_class = {
++ .next = &idle_sched_class,
+ .enqueue_task = enqueue_task_fair,
+ .dequeue_task = dequeue_task_fair,
+ .yield_task = yield_task_fair,
+
+- .check_preempt_curr = check_preempt_curr_fair,
++ .check_preempt_curr = check_preempt_wakeup,
+
+ .pick_next_task = pick_next_task_fair,
+ .put_prev_task = put_prev_task_fair,
+
++#ifdef CONFIG_SMP
+ .load_balance = load_balance_fair,
++ .move_one_task = move_one_task_fair,
++#endif
+
+ .set_curr_task = set_curr_task_fair,
+ .task_tick = task_tick_fair,
+ .task_new = task_new_fair,
+ };
+@@ -1235,9 +1127,12 @@ struct sched_class fair_sched_class __re
+ #ifdef CONFIG_SCHED_DEBUG
+ static void print_cfs_stats(struct seq_file *m, int cpu)
+ {
+ struct cfs_rq *cfs_rq;
+
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
++#endif
+ for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
+ print_cfs_rq(m, cpu, cfs_rq);
+ }
+ #endif
+--- linux-2.6.23.orig/kernel/sched_idletask.c
++++ linux-2.6.23/kernel/sched_idletask.c
+@@ -35,37 +35,55 @@ dequeue_task_idle(struct rq *rq, struct
+
+ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+ {
+ }
+
++#ifdef CONFIG_SMP
+ static unsigned long
+ load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, int *this_best_prio)
++ unsigned long max_load_move,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned, int *this_best_prio)
+ {
+ return 0;
+ }
+
++static int
++move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ return 0;
++}
++#endif
++
+ static void task_tick_idle(struct rq *rq, struct task_struct *curr)
+ {
+ }
+
++static void set_curr_task_idle(struct rq *rq)
++{
++}
++
+ /*
+ * Simple, special scheduling class for the per-CPU idle tasks:
+ */
+-static struct sched_class idle_sched_class __read_mostly = {
++const struct sched_class idle_sched_class = {
++ /* .next is NULL */
+ /* no enqueue/yield_task for idle tasks */
+
+ /* dequeue is not valid, we print a debug message there: */
+ .dequeue_task = dequeue_task_idle,
+
+ .check_preempt_curr = check_preempt_curr_idle,
+
+ .pick_next_task = pick_next_task_idle,
+ .put_prev_task = put_prev_task_idle,
+
++#ifdef CONFIG_SMP
+ .load_balance = load_balance_idle,
++ .move_one_task = move_one_task_idle,
++#endif
+
++ .set_curr_task = set_curr_task_idle,
+ .task_tick = task_tick_idle,
+ /* no .task_new for idle tasks */
+ };
+--- linux-2.6.23.orig/kernel/sched_rt.c
++++ linux-2.6.23/kernel/sched_rt.c
+@@ -5,11 +5,11 @@
+
+ /*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+-static inline void update_curr_rt(struct rq *rq)
++static void update_curr_rt(struct rq *rq)
+ {
+ struct task_struct *curr = rq->curr;
+ u64 delta_exec;
+
+ if (!task_has_rt_policy(curr))
+@@ -21,10 +21,11 @@ static inline void update_curr_rt(struct
+
+ schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
+
+ curr->se.sum_exec_runtime += delta_exec;
+ curr->se.exec_start = rq->clock;
++ cpuacct_charge(curr, delta_exec);
+ }
+
+ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+ {
+ struct rt_prio_array *array = &rq->rt.active;
+@@ -57,13 +58,13 @@ static void requeue_task_rt(struct rq *r
+
+ list_move_tail(&p->run_list, array->queue + p->prio);
+ }
+
+ static void
+-yield_task_rt(struct rq *rq, struct task_struct *p)
++yield_task_rt(struct rq *rq)
+ {
+- requeue_task_rt(rq, p);
++ requeue_task_rt(rq, rq->curr);
+ }
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+@@ -96,10 +97,11 @@ static void put_prev_task_rt(struct rq *
+ {
+ update_curr_rt(rq);
+ p->se.exec_start = 0;
+ }
+
++#ifdef CONFIG_SMP
+ /*
+ * Load-balancing iterator. Note: while the runqueue stays locked
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+@@ -170,45 +172,57 @@ static struct task_struct *load_balance_
+ return p;
+ }
+
+ static unsigned long
+ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, int *this_best_prio)
++ unsigned long max_load_move,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned, int *this_best_prio)
+ {
+- int nr_moved;
+ struct rq_iterator rt_rq_iterator;
+- unsigned long load_moved;
+
+ rt_rq_iterator.start = load_balance_start_rt;
+ rt_rq_iterator.next = load_balance_next_rt;
+ /* pass 'busiest' rq argument into
+ * load_balance_[start|next]_rt iterators
+ */
+ rt_rq_iterator.arg = busiest;
+
+- nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
+- max_load_move, sd, idle, all_pinned, &load_moved,
+- this_best_prio, &rt_rq_iterator);
++ return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
++ idle, all_pinned, this_best_prio, &rt_rq_iterator);
++}
++
++static int
++move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ struct rq_iterator rt_rq_iterator;
++
++ rt_rq_iterator.start = load_balance_start_rt;
++ rt_rq_iterator.next = load_balance_next_rt;
++ rt_rq_iterator.arg = busiest;
+
+- return load_moved;
++ return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
++ &rt_rq_iterator);
+ }
++#endif
+
+ static void task_tick_rt(struct rq *rq, struct task_struct *p)
+ {
++ update_curr_rt(rq);
++
+ /*
+ * RR tasks need a special form of timeslice management.
+ * FIFO tasks have no timeslices.
+ */
+ if (p->policy != SCHED_RR)
+ return;
+
+ if (--p->time_slice)
+ return;
+
+- p->time_slice = static_prio_timeslice(p->static_prio);
++ p->time_slice = DEF_TIMESLICE;
+
+ /*
+ * Requeue to the end of queue if we are not the only element
+ * on the queue:
+ */
+@@ -216,19 +230,31 @@ static void task_tick_rt(struct rq *rq,
+ requeue_task_rt(rq, p);
+ set_tsk_need_resched(p);
+ }
+ }
+
+-static struct sched_class rt_sched_class __read_mostly = {
++static void set_curr_task_rt(struct rq *rq)
++{
++ struct task_struct *p = rq->curr;
++
++ p->se.exec_start = rq->clock;
++}
++
++const struct sched_class rt_sched_class = {
++ .next = &fair_sched_class,
+ .enqueue_task = enqueue_task_rt,
+ .dequeue_task = dequeue_task_rt,
+ .yield_task = yield_task_rt,
+
+ .check_preempt_curr = check_preempt_curr_rt,
+
+ .pick_next_task = pick_next_task_rt,
+ .put_prev_task = put_prev_task_rt,
+
++#ifdef CONFIG_SMP
+ .load_balance = load_balance_rt,
++ .move_one_task = move_one_task_rt,
++#endif
+
++ .set_curr_task = set_curr_task_rt,
+ .task_tick = task_tick_rt,
+ };
+--- linux-2.6.23.orig/kernel/sched_stats.h
++++ linux-2.6.23/kernel/sched_stats.h
+@@ -14,22 +14,22 @@ static int show_schedstat(struct seq_fil
+ seq_printf(seq, "timestamp %lu\n", jiffies);
+ for_each_online_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ #ifdef CONFIG_SMP
+ struct sched_domain *sd;
+- int dcnt = 0;
++ int dcount = 0;
+ #endif
+
+ /* runqueue-specific stats */
+ seq_printf(seq,
+- "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
++ "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
+ cpu, rq->yld_both_empty,
+- rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
+- rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
+- rq->ttwu_cnt, rq->ttwu_local,
++ rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
++ rq->sched_switch, rq->sched_count, rq->sched_goidle,
++ rq->ttwu_count, rq->ttwu_local,
+ rq->rq_sched_info.cpu_time,
+- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
++ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+
+ seq_printf(seq, "\n");
+
+ #ifdef CONFIG_SMP
+ /* domain-specific stats */
+@@ -37,29 +37,28 @@ static int show_schedstat(struct seq_fil
+ for_each_domain(cpu, sd) {
+ enum cpu_idle_type itype;
+ char mask_str[NR_CPUS];
+
+ cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
+- seq_printf(seq, "domain%d %s", dcnt++, mask_str);
++ seq_printf(seq, "domain%d %s", dcount++, mask_str);
+ for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+ itype++) {
+- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
+- "%lu",
+- sd->lb_cnt[itype],
++ seq_printf(seq, " %u %u %u %u %u %u %u %u",
++ sd->lb_count[itype],
+ sd->lb_balanced[itype],
+ sd->lb_failed[itype],
+ sd->lb_imbalance[itype],
+ sd->lb_gained[itype],
+ sd->lb_hot_gained[itype],
+ sd->lb_nobusyq[itype],
+ sd->lb_nobusyg[itype]);
+ }
+- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
+- " %lu %lu %lu\n",
+- sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
+- sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
+- sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
++ seq_printf(seq,
++ " %u %u %u %u %u %u %u %u %u %u %u %u\n",
++ sd->alb_count, sd->alb_failed, sd->alb_pushed,
++ sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
++ sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
+ sd->ttwu_wake_remote, sd->ttwu_move_affine,
+ sd->ttwu_move_balance);
+ }
+ preempt_enable();
+ #endif
+@@ -99,11 +98,11 @@ const struct file_operations proc_scheds
+ static inline void
+ rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
+ {
+ if (rq) {
+ rq->rq_sched_info.run_delay += delta;
+- rq->rq_sched_info.pcnt++;
++ rq->rq_sched_info.pcount++;
+ }
+ }
+
+ /*
+ * Expects runqueue lock to be held for atomicity of update
+@@ -155,18 +154,18 @@ static inline void sched_info_dequeued(s
+ * long it was waiting to run. We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+ static void sched_info_arrive(struct task_struct *t)
+ {
+- unsigned long long now = sched_clock(), delta = 0;
++ unsigned long long now = task_rq(t)->clock, delta = 0;
+
+ if (t->sched_info.last_queued)
+ delta = now - t->sched_info.last_queued;
+ sched_info_dequeued(t);
+ t->sched_info.run_delay += delta;
+ t->sched_info.last_arrival = now;
+- t->sched_info.pcnt++;
++ t->sched_info.pcount++;
+
+ rq_sched_info_arrive(task_rq(t), delta);
+ }
+
+ /*
+@@ -186,20 +185,21 @@ static void sched_info_arrive(struct tas
+ */
+ static inline void sched_info_queued(struct task_struct *t)
+ {
+ if (unlikely(sched_info_on()))
+ if (!t->sched_info.last_queued)
+- t->sched_info.last_queued = sched_clock();
++ t->sched_info.last_queued = task_rq(t)->clock;
+ }
+
+ /*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily. Now we can calculate how long we ran.
+ */
+ static inline void sched_info_depart(struct task_struct *t)
+ {
+- unsigned long long delta = sched_clock() - t->sched_info.last_arrival;
++ unsigned long long delta = task_rq(t)->clock -
++ t->sched_info.last_arrival;
+
+ t->sched_info.cpu_time += delta;
+ rq_sched_info_depart(task_rq(t), delta);
+ }
+
+--- linux-2.6.23.orig/kernel/sysctl.c
++++ linux-2.6.23/kernel/sysctl.c
+@@ -211,35 +211,35 @@ static ctl_table root_table[] = {
+ { .ctl_name = 0 }
+ };
+
+ #ifdef CONFIG_SCHED_DEBUG
+ static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
+-static unsigned long max_sched_granularity_ns = 1000000000; /* 1 second */
++static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+ static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
+-static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
++static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+ #endif
+
+-static ctl_table kern_table[] = {
++static struct ctl_table kern_table[] = {
+ #ifdef CONFIG_SCHED_DEBUG
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_min_granularity_ns",
+ .data = &sysctl_sched_min_granularity,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
++ .proc_handler = &sched_nr_latency_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_sched_granularity_ns,
+ .extra2 = &max_sched_granularity_ns,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_latency_ns",
+ .data = &sysctl_sched_latency,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
++ .proc_handler = &sched_nr_latency_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_sched_granularity_ns,
+ .extra2 = &max_sched_granularity_ns,
+ },
+ {
+@@ -264,47 +264,43 @@ static ctl_table kern_table[] = {
+ .extra1 = &min_wakeup_granularity_ns,
+ .extra2 = &max_wakeup_granularity_ns,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_stat_granularity_ns",
+- .data = &sysctl_sched_stat_granularity,
++ .procname = "sched_child_runs_first",
++ .data = &sysctl_sched_child_runs_first,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
+- .strategy = &sysctl_intvec,
+- .extra1 = &min_wakeup_granularity_ns,
+- .extra2 = &max_wakeup_granularity_ns,
++ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_runtime_limit_ns",
+- .data = &sysctl_sched_runtime_limit,
++ .procname = "sched_features",
++ .data = &sysctl_sched_features,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
+- .strategy = &sysctl_intvec,
+- .extra1 = &min_sched_granularity_ns,
+- .extra2 = &max_sched_granularity_ns,
++ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_child_runs_first",
+- .data = &sysctl_sched_child_runs_first,
++ .procname = "sched_migration_cost",
++ .data = &sysctl_sched_migration_cost,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
++#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_features",
+- .data = &sysctl_sched_features,
++ .procname = "sched_nr_migrate",
++ .data = &sysctl_sched_nr_migrate,
+ .maxlen = sizeof(unsigned int),
+- .mode = 0644,
++ .mode = 644,
+ .proc_handler = &proc_dointvec,
+ },
+ #endif
++#endif
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_compat_yield",
+ .data = &sysctl_sched_compat_yield,
+ .maxlen = sizeof(unsigned int),
+--- linux-2.6.23.orig/kernel/timer.c
++++ linux-2.6.23/kernel/timer.c
+@@ -824,14 +824,17 @@ void update_process_times(int user_tick)
+ {
+ struct task_struct *p = current;
+ int cpu = smp_processor_id();
+
+ /* Note: this timer irq context must be accounted for as well. */
+- if (user_tick)
++ if (user_tick) {
+ account_user_time(p, jiffies_to_cputime(1));
+- else
++ account_user_time_scaled(p, jiffies_to_cputime(1));
++ } else {
+ account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
++ account_system_time_scaled(p, jiffies_to_cputime(1));
++ }
+ run_local_timers();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, user_tick);
+ scheduler_tick();
+ run_posix_cpu_timers(p);
+--- linux-2.6.23.orig/kernel/tsacct.c
++++ linux-2.6.23/kernel/tsacct.c
+@@ -60,10 +60,14 @@ void bacct_add_tsk(struct taskstats *sta
+ stats->ac_ppid = pid_alive(tsk) ?
+ rcu_dereference(tsk->real_parent)->tgid : 0;
+ rcu_read_unlock();
+ stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
+ stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
++ stats->ac_utimescaled =
++ cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
++ stats->ac_stimescaled =
++ cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
+ stats->ac_minflt = tsk->min_flt;
+ stats->ac_majflt = tsk->maj_flt;
+
+ strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm));
+ }
+--- linux-2.6.23.orig/kernel/user.c
++++ linux-2.6.23/kernel/user.c
+@@ -48,40 +48,242 @@ struct user_struct root_user = {
+ .locked_shm = 0,
+ #ifdef CONFIG_KEYS
+ .uid_keyring = &root_user_keyring,
+ .session_keyring = &root_session_keyring,
+ #endif
++#ifdef CONFIG_FAIR_USER_SCHED
++ .tg = &init_task_group,
++#endif
+ };
+
+ /*
+ * These routines must be called with the uidhash spinlock held!
+ */
+-static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
++static inline void uid_hash_insert(struct user_struct *up,
++ struct hlist_head *hashent)
+ {
+ hlist_add_head(&up->uidhash_node, hashent);
+ }
+
+ static inline void uid_hash_remove(struct user_struct *up)
+ {
+ hlist_del_init(&up->uidhash_node);
+ }
+
+-static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
++static inline struct user_struct *uid_hash_find(uid_t uid,
++ struct hlist_head *hashent)
+ {
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+- if(user->uid == uid) {
++ if (user->uid == uid) {
+ atomic_inc(&user->__count);
+ return user;
+ }
+ }
+
+ return NULL;
+ }
+
++#ifdef CONFIG_FAIR_USER_SCHED
++
++static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
++static DEFINE_MUTEX(uids_mutex);
++
++static void sched_destroy_user(struct user_struct *up)
++{
++ sched_destroy_group(up->tg);
++}
++
++static int sched_create_user(struct user_struct *up)
++{
++ int rc = 0;
++
++ up->tg = sched_create_group();
++ if (IS_ERR(up->tg))
++ rc = -ENOMEM;
++
++ return rc;
++}
++
++static void sched_switch_user(struct task_struct *p)
++{
++ sched_move_task(p);
++}
++
++static inline void uids_mutex_lock(void)
++{
++ mutex_lock(&uids_mutex);
++}
++
++static inline void uids_mutex_unlock(void)
++{
++ mutex_unlock(&uids_mutex);
++}
++
++/* return cpu shares held by the user */
++ssize_t cpu_shares_show(struct kset *kset, char *buffer)
++{
++ struct user_struct *up = container_of(kset, struct user_struct, kset);
++
++ return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
++}
++
++/* modify cpu shares held by the user */
++ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
++{
++ struct user_struct *up = container_of(kset, struct user_struct, kset);
++ unsigned long shares;
++ int rc;
++
++ sscanf(buffer, "%lu", &shares);
++
++ rc = sched_group_set_shares(up->tg, shares);
++
++ return (rc ? rc : size);
++}
++
++static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
++{
++ sa->attr.name = name; sa->attr.owner = NULL;
++ sa->attr.mode = mode;
++ sa->show = cpu_shares_show;
++ sa->store = cpu_shares_store;
++}
++
++/* Create "/sys/kernel/uids/<uid>" directory and
++ * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
++ */
++static int user_kobject_create(struct user_struct *up)
++{
++ struct kset *kset = &up->kset;
++ struct kobject *kobj = &kset->kobj;
++ int error;
++
++ memset(kset, 0, sizeof(struct kset));
++ kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
++ kobject_set_name(kobj, "%d", up->uid);
++ kset_init(kset);
++ user_attr_init(&up->user_attr, "cpu_share", 0644);
++
++ error = kobject_add(kobj);
++ if (error)
++ goto done;
++
++ error = sysfs_create_file(kobj, &up->user_attr.attr);
++ if (error)
++ kobject_del(kobj);
++
++ kobject_uevent(kobj, KOBJ_ADD);
++
++done:
++ return error;
++}
++
++/* create these in sysfs filesystem:
++ * "/sys/kernel/uids" directory
++ * "/sys/kernel/uids/0" directory (for root user)
++ * "/sys/kernel/uids/0/cpu_share" file (for root user)
++ */
++int __init uids_kobject_init(void)
++{
++ int error;
++
++ /* create under /sys/kernel dir */
++ uids_kobject.parent = &kernel_subsys.kobj;
++ uids_kobject.kset = &kernel_subsys;
++ kobject_set_name(&uids_kobject, "uids");
++ kobject_init(&uids_kobject);
++
++ error = kobject_add(&uids_kobject);
++ if (!error)
++ error = user_kobject_create(&root_user);
++
++ return error;
++}
++
++/* work function to remove sysfs directory for a user and free up
++ * corresponding structures.
++ */
++static void remove_user_sysfs_dir(struct work_struct *w)
++{
++ struct user_struct *up = container_of(w, struct user_struct, work);
++ struct kobject *kobj = &up->kset.kobj;
++ unsigned long flags;
++ int remove_user = 0;
++
++ /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
++ * atomic.
++ */
++ uids_mutex_lock();
++
++ local_irq_save(flags);
++
++ if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
++ uid_hash_remove(up);
++ remove_user = 1;
++ spin_unlock_irqrestore(&uidhash_lock, flags);
++ } else {
++ local_irq_restore(flags);
++ }
++
++ if (!remove_user)
++ goto done;
++
++ sysfs_remove_file(kobj, &up->user_attr.attr);
++ kobject_uevent(kobj, KOBJ_REMOVE);
++ kobject_del(kobj);
++
++ sched_destroy_user(up);
++ key_put(up->uid_keyring);
++ key_put(up->session_keyring);
++ kmem_cache_free(uid_cachep, up);
++
++done:
++ uids_mutex_unlock();
++}
++
++/* IRQs are disabled and uidhash_lock is held upon function entry.
++ * IRQ state (as stored in flags) is restored and uidhash_lock released
++ * upon function exit.
++ */
++static inline void free_user(struct user_struct *up, unsigned long flags)
++{
++ /* restore back the count */
++ atomic_inc(&up->__count);
++ spin_unlock_irqrestore(&uidhash_lock, flags);
++
++ INIT_WORK(&up->work, remove_user_sysfs_dir);
++ schedule_work(&up->work);
++}
++
++#else /* CONFIG_FAIR_USER_SCHED */
++
++static void sched_destroy_user(struct user_struct *up) { }
++static int sched_create_user(struct user_struct *up) { return 0; }
++static void sched_switch_user(struct task_struct *p) { }
++static inline int user_kobject_create(struct user_struct *up) { return 0; }
++static inline void uids_mutex_lock(void) { }
++static inline void uids_mutex_unlock(void) { }
++
++/* IRQs are disabled and uidhash_lock is held upon function entry.
++ * IRQ state (as stored in flags) is restored and uidhash_lock released
++ * upon function exit.
++ */
++static inline void free_user(struct user_struct *up, unsigned long flags)
++{
++ uid_hash_remove(up);
++ spin_unlock_irqrestore(&uidhash_lock, flags);
++ sched_destroy_user(up);
++ key_put(up->uid_keyring);
++ key_put(up->session_keyring);
++ kmem_cache_free(uid_cachep, up);
++}
++
++#endif /* CONFIG_FAIR_USER_SCHED */
++
+ /*
+ * Locate the user_struct for the passed UID. If found, take a ref on it. The
+ * caller must undo that ref with free_uid().
+ *
+ * If the user_struct could not be found, return NULL.
+@@ -104,26 +306,26 @@ void free_uid(struct user_struct *up)
+
+ if (!up)
+ return;
+
+ local_irq_save(flags);
+- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+- uid_hash_remove(up);
+- spin_unlock_irqrestore(&uidhash_lock, flags);
+- key_put(up->uid_keyring);
+- key_put(up->session_keyring);
+- kmem_cache_free(uid_cachep, up);
+- } else {
++ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
++ free_user(up, flags);
++ else
+ local_irq_restore(flags);
+- }
+ }
+
+ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
+ {
+ struct hlist_head *hashent = uidhashentry(ns, uid);
+ struct user_struct *up;
+
++ /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
++ * atomic.
++ */
++ uids_mutex_lock();
++
+ spin_lock_irq(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ spin_unlock_irq(&uidhash_lock);
+
+ if (!up) {
+@@ -148,27 +350,51 @@ struct user_struct * alloc_uid(struct us
+ if (alloc_uid_keyring(new, current) < 0) {
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
+
++ if (sched_create_user(new) < 0) {
++ key_put(new->uid_keyring);
++ key_put(new->session_keyring);
++ kmem_cache_free(uid_cachep, new);
++ return NULL;
++ }
++
++ if (user_kobject_create(new)) {
++ sched_destroy_user(new);
++ key_put(new->uid_keyring);
++ key_put(new->session_keyring);
++ kmem_cache_free(uid_cachep, new);
++ uids_mutex_unlock();
++ return NULL;
++ }
++
+ /*
+ * Before adding this, check whether we raced
+ * on adding the same user already..
+ */
+ spin_lock_irq(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ if (up) {
++ /* This case is not possible when CONFIG_FAIR_USER_SCHED
++ * is defined, since we serialize alloc_uid() using
++ * uids_mutex. Hence no need to call
++ * sched_destroy_user() or remove_user_sysfs_dir().
++ */
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
+ kmem_cache_free(uid_cachep, new);
+ } else {
+ uid_hash_insert(new, hashent);
+ up = new;
+ }
+ spin_unlock_irq(&uidhash_lock);
+
+ }
++
++ uids_mutex_unlock();
++
+ return up;
+ }
+
+ void switch_uid(struct user_struct *new_user)
+ {
+@@ -182,10 +408,11 @@ void switch_uid(struct user_struct *new_
+ old_user = current->user;
+ atomic_inc(&new_user->processes);
+ atomic_dec(&old_user->processes);
+ switch_uid_keyring(new_user);
+ current->user = new_user;
++ sched_switch_user(current);
+
+ /*
+ * We need to synchronize with __sigqueue_alloc()
+ * doing a get_uid(p->user).. If that saw the old
+ * user value, we need to wait until it has exited
+--- linux-2.6.23.orig/mm/memory_hotplug.c
++++ linux-2.6.23/mm/memory_hotplug.c
+@@ -215,10 +215,14 @@ int online_pages(unsigned long pfn, unsi
+ }
+ zone->present_pages += onlined_pages;
+ zone->zone_pgdat->node_present_pages += onlined_pages;
+
+ setup_per_zone_pages_min();
++ if (onlined_pages) {
++ kswapd_run(zone_to_nid(zone));
++ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
++ }
+
+ if (need_zonelists_rebuild)
+ build_all_zonelists();
+ vm_total_pages = nr_free_pagecache_pages();
+ writeback_set_ratelimit();
+@@ -269,13 +273,10 @@ int add_memory(int nid, u64 start, u64 s
+ if (!node_online(nid)) {
+ pgdat = hotadd_new_pgdat(nid, start);
+ if (!pgdat)
+ return -ENOMEM;
+ new_pgdat = 1;
+- ret = kswapd_run(nid);
+- if (ret)
+- goto error;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, start, size);
+
+--- linux-2.6.23.orig/mm/page_alloc.c
++++ linux-2.6.23/mm/page_alloc.c
+@@ -45,17 +45,25 @@
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+ #include "internal.h"
+
+ /*
+- * MCD - HACK: Find somewhere to initialize this EARLY, or make this
+- * initializer cleaner
++ * Array of node states.
+ */
+-nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
+-EXPORT_SYMBOL(node_online_map);
+-nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
+-EXPORT_SYMBOL(node_possible_map);
++nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
++ [N_POSSIBLE] = NODE_MASK_ALL,
++ [N_ONLINE] = { { [0] = 1UL } },
++#ifndef CONFIG_NUMA
++ [N_NORMAL_MEMORY] = { { [0] = 1UL } },
++#ifdef CONFIG_HIGHMEM
++ [N_HIGH_MEMORY] = { { [0] = 1UL } },
++#endif
++ [N_CPU] = { { [0] = 1UL } },
++#endif /* NUMA */
++};
++EXPORT_SYMBOL(node_states);
++
+ unsigned long totalram_pages __read_mostly;
+ unsigned long totalreserve_pages __read_mostly;
+ long nr_swap_pages;
+ int percpu_pagelist_fraction;
+
+@@ -2070,18 +2078,39 @@ static void build_zonelist_cache(pg_data
+ pgdat->node_zonelists[i].zlcache_ptr = NULL;
+ }
+
+ #endif /* CONFIG_NUMA */
+
++/* Any regular memory on that node ? */
++static void check_for_regular_memory(pg_data_t *pgdat)
++{
++#ifdef CONFIG_HIGHMEM
++ enum zone_type zone_type;
++
++ for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
++ struct zone *zone = &pgdat->node_zones[zone_type];
++ if (zone->present_pages)
++ node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
++ }
++#endif
++}
++
+ /* return values int ....just for stop_machine_run() */
+ static int __build_all_zonelists(void *dummy)
+ {
+ int nid;
+
+ for_each_online_node(nid) {
+- build_zonelists(NODE_DATA(nid));
+- build_zonelist_cache(NODE_DATA(nid));
++ pg_data_t *pgdat = NODE_DATA(nid);
++
++ build_zonelists(pgdat);
++ build_zonelist_cache(pgdat);
++
++ /* Any memory on that node */
++ if (pgdat->node_present_pages)
++ node_set_state(nid, N_HIGH_MEMORY);
++ check_for_regular_memory(pgdat);
+ }
+ return 0;
+ }
+
+ void build_all_zonelists(void)
+@@ -2322,18 +2351,21 @@ static struct per_cpu_pageset boot_pages
+ * per cpu pageset array in struct zone.
+ */
+ static int __cpuinit process_zones(int cpu)
+ {
+ struct zone *zone, *dzone;
++ int node = cpu_to_node(cpu);
++
++ node_set_state(node, N_CPU); /* this node has a cpu */
+
+ for_each_zone(zone) {
+
+ if (!populated_zone(zone))
+ continue;
+
+ zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
+- GFP_KERNEL, cpu_to_node(cpu));
++ GFP_KERNEL, node);
+ if (!zone_pcp(zone, cpu))
+ goto bad;
+
+ setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+
+--- linux-2.6.23.orig/mm/vmscan.c
++++ linux-2.6.23/mm/vmscan.c
+@@ -1845,11 +1845,10 @@ static int __zone_reclaim(struct zone *z
+ return nr_reclaimed >= nr_pages;
+ }
+
+ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
+ {
+- cpumask_t mask;
+ int node_id;
+
+ /*
+ * Zone reclaim reclaims unmapped file backed pages and
+ * slab pages if we are over the defined limits.
+@@ -1882,11 +1881,10 @@ int zone_reclaim(struct zone *zone, gfp_
+ * have associated processors. This will favor the local processor
+ * over remote processors and spread off node memory allocations
+ * as wide as possible.
+ */
+ node_id = zone_to_nid(zone);
+- mask = node_to_cpumask(node_id);
+- if (!cpus_empty(mask) && node_id != numa_node_id())
++ if (node_state(node_id, N_CPU) && node_id != numa_node_id())
+ return 0;
+ return __zone_reclaim(zone, gfp_mask, order);
+ }
+ #endif
+--- linux-2.6.23.orig/net/unix/af_unix.c
++++ linux-2.6.23/net/unix/af_unix.c
+@@ -331,11 +331,11 @@ static inline int unix_writable(struct s
+ static void unix_write_space(struct sock *sk)
+ {
+ read_lock(&sk->sk_callback_lock);
+ if (unix_writable(sk)) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+- wake_up_interruptible(sk->sk_sleep);
++ wake_up_interruptible_sync(sk->sk_sleep);
+ sk_wake_async(sk, 2, POLL_OUT);
+ }
+ read_unlock(&sk->sk_callback_lock);
+ }
+
+@@ -1640,11 +1640,11 @@ static int unix_dgram_recvmsg(struct kio
+ err = 0;
+ unix_state_unlock(sk);
+ goto out_unlock;
+ }
+
+- wake_up_interruptible(&u->peer_wait);
++ wake_up_interruptible_sync(&u->peer_wait);
+
+ if (msg->msg_name)
+ unix_copy_addr(msg, skb->sk);
+
+ if (size > skb->len)
diff --git a/recipes/linux/linux-2.6.24/time.h.patch b/recipes/linux/linux-2.6.23/time.h.patch
index fd22f3a01d..fd22f3a01d 100644
--- a/recipes/linux/linux-2.6.24/time.h.patch
+++ b/recipes/linux/linux-2.6.23/time.h.patch
diff --git a/recipes/linux/linux-2.6.25.20/ronetix-pm9261/defconfig b/recipes/linux/linux-2.6.25/ronetix-pm9261/defconfig
index 77cb9fafe5..77cb9fafe5 100644
--- a/recipes/linux/linux-2.6.25.20/ronetix-pm9261/defconfig
+++ b/recipes/linux/linux-2.6.25/ronetix-pm9261/defconfig
diff --git a/recipes/linux/linux-2.6.25.20/ronetix-pm9263/defconfig b/recipes/linux/linux-2.6.25/ronetix-pm9263/defconfig
index c7171cd4ae..c7171cd4ae 100644
--- a/recipes/linux/linux-2.6.25.20/ronetix-pm9263/defconfig
+++ b/recipes/linux/linux-2.6.25/ronetix-pm9263/defconfig
diff --git a/recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch b/recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch
deleted file mode 100644
index 6b8aaf4445..0000000000
--- a/recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From a4cba996cb77da4afc26c35402a70c3f008afe96 Mon Sep 17 00:00:00 2001
-From: Michel Pollet <buserror@gmail.com>
-Date: Sat, 14 Mar 2009 10:34:32 +0000
-Subject: [PATCH] 920T: Temp(?) fix for the 40 relocation binutils problem
-
-This prevents the modules failing to load when made
-with modern toolchains. There is no way to prevent binutils
-to generate these relocations, and on the 920t they are
-in fact not needed. So this patch just skip them.
-
-Signed-off-by: Michel Pollet <buserror@gmail.com>
----
- arch/arm/include/asm/elf.h | 1 +
- arch/arm/kernel/module.c | 7 +++++++
- 2 files changed, 8 insertions(+), 0 deletions(-)
-
-diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index a58378c..ce3b36e 100644
---- a/arch/arm/include/asm/elf.h
-+++ b/arch/arm/include/asm/elf.h
-@@ -50,6 +50,7 @@ typedef struct user_fp elf_fpregset_t;
- #define R_ARM_ABS32 2
- #define R_ARM_CALL 28
- #define R_ARM_JUMP24 29
-+#define R_ARM_V4BX 40
-
- /*
- * These are used to set parameters in the core dumps.
-diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
-index dab48f2..fa03392 100644
---- a/arch/arm/kernel/module.c
-+++ b/arch/arm/kernel/module.c
-@@ -132,6 +132,13 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- *(u32 *)loc |= offset & 0x00ffffff;
- break;
-
-+#ifdef CONFIG_CPU_ARM920T
-+ /* modern toolchain generate V4BX for the modules, and there is no
-+ * way to skip them being generated in the .ko, so in our case, we just
-+ * can ignore them */
-+ case R_ARM_V4BX: /* Ignore these sections */
-+ break;
-+#endif
- default:
- printk(KERN_ERR "%s: unknown relocation: %u\n",
- module->name, ELF32_R_TYPE(rel->r_info));
---
-1.5.6.3
-
diff --git a/recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch b/recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch
deleted file mode 100644
index bfad6d80eb..0000000000
--- a/recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 11c0b33d2a046a37bcd96528faa0e93359ef4a4b Mon Sep 17 00:00:00 2001
-From: Herbert Xu <herbert@gondor.apana.org.au>
-Date: Thu, 26 Mar 2009 00:59:10 -0700
-Subject: [PATCH] GRO: Disable GRO on legacy netif_rx path
-
-When I fixed the GRO crash in the legacy receive path I used
-napi_complete to replace __napi_complete. Unfortunately they're
-not the same when NETPOLL is enabled, which may result in us
-not calling __napi_complete at all.
-
-What's more, we really do need to keep the __napi_complete call
-within the IRQ-off section since in theory an IRQ can occur in
-between and fill up the backlog to the maximum, causing us to
-lock up.
-
-Since we can't seem to find a fix that works properly right now,
-this patch reverts all the GRO support from the netif_rx path.
-
-Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Michel Pollet <buserror@gmail.com>
----
- net/core/dev.c | 9 +++------
- 1 files changed, 3 insertions(+), 6 deletions(-)
-
-diff --git a/net/core/dev.c b/net/core/dev.c
-index e3fe5c7..e438f54 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -2588,18 +2588,15 @@ static int process_backlog(struct napi_struct *napi, int quota)
- local_irq_disable();
- skb = __skb_dequeue(&queue->input_pkt_queue);
- if (!skb) {
-+ __napi_complete(napi);
- local_irq_enable();
-- napi_complete(napi);
-- goto out;
-+ break;
- }
- local_irq_enable();
-
-- napi_gro_receive(napi, skb);
-+ netif_receive_skb(skb);
- } while (++work < quota && jiffies == start_time);
-
-- napi_gro_flush(napi);
--
--out:
- return work;
- }
-
---
-1.5.6.3
-
diff --git a/recipes/linux/linux_2.6.18.bb b/recipes/linux/linux_2.6.18.bb
index b445325f2d..df7641565a 100644
--- a/recipes/linux/linux_2.6.18.bb
+++ b/recipes/linux/linux_2.6.18.bb
@@ -6,11 +6,12 @@ require linux.inc
DEFAULT_PREFERENCE = "-1"
DEFAULT_PREFERENCE_avr32 = "1"
-PR = "r0"
+PR = "r1"
PARALLEL_MAKE=""
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.8.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.20.bb b/recipes/linux/linux_2.6.20.bb
index d8427bfc6f..d46c623e6b 100644
--- a/recipes/linux/linux_2.6.20.bb
+++ b/recipes/linux/linux_2.6.20.bb
@@ -9,6 +9,8 @@ DEFAULT_PREFERENCE_nhk15 = "1"
PR = "r10"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.21.bz2;patch=1 \
+ file://0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch;patch=1 \
file://defconfig"
SRC_URI_append_n2100 = "\
diff --git a/recipes/linux/linux_2.6.21+2.6.22-rc1.bb b/recipes/linux/linux_2.6.21+2.6.22-rc1.bb
index 35b2efe74a..563820fec7 100644
--- a/recipes/linux/linux_2.6.21+2.6.22-rc1.bb
+++ b/recipes/linux/linux_2.6.21+2.6.22-rc1.bb
@@ -9,7 +9,7 @@ KERNEL_RELEASE = "2.6.22-rc1"
PR = "r2"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.21.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-2.6.22-rc1.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.22/patch-2.6.22-rc1.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.21.bb b/recipes/linux/linux_2.6.21.bb
index c4c7297f1c..bbac0c2ae9 100644
--- a/recipes/linux/linux_2.6.21.bb
+++ b/recipes/linux/linux_2.6.21.bb
@@ -5,9 +5,10 @@ DEFAULT_PREFERENCE_at91sam9263ek = "-1"
DEFAULT_PREFERENCE_gumstix-connex = "1"
DEFAULT_PREFERENCE_gumstix-verdex = "1"
-PR = "r12"
+PR = "r13"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.7.bz2;patch=1 \
file://tsc2003.c \
file://tsc2003-config.diff;patch=1 \
file://defconfig \
diff --git a/recipes/linux/linux_2.6.22+2.6.23-rc3.bb b/recipes/linux/linux_2.6.22+2.6.23-rc3.bb
index 47c3564087..e52800a8f4 100644
--- a/recipes/linux/linux_2.6.22+2.6.23-rc3.bb
+++ b/recipes/linux/linux_2.6.22+2.6.23-rc3.bb
@@ -10,7 +10,7 @@ KERNEL_RELEASE = "2.6.23-rc3"
PR = "r1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${BASE_KERNEL_VERSION}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-${KERNEL_VERSION}.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.23/patch-${KERNEL_VERSION}.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.22+2.6.23-rc5.bb b/recipes/linux/linux_2.6.22+2.6.23-rc5.bb
index 3165c39a11..e21855269f 100644
--- a/recipes/linux/linux_2.6.22+2.6.23-rc5.bb
+++ b/recipes/linux/linux_2.6.22+2.6.23-rc5.bb
@@ -10,7 +10,7 @@ KERNEL_VERSION = "2.6.23-rc5"
KERNEL_RELEASE = "2.6.23-rc5"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${BASE_KERNEL_VERSION}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-${KERNEL_VERSION}.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.23/patch-${KERNEL_VERSION}.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.22.6.bb b/recipes/linux/linux_2.6.22.6.bb
deleted file mode 100644
index 69b988e332..0000000000
--- a/recipes/linux/linux_2.6.22.6.bb
+++ /dev/null
@@ -1,31 +0,0 @@
-require linux.inc
-
-DEFAULT_PREFERENCE = "-1"
-DEFAULT_PREFERENCE_ts72xx = "1"
-DEFAULT_PREFERENCE_mx31moboard = "1"
-
-PR = "r1"
-
-SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.22.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.bz2;patch=1 \
- file://defconfig \
- "
-
-SRC_URI_append_ts72xx = "\
- file://ep93xx-gpio-interrupt-debounce.diff;patch=1 \
- file://ep93xx-i2c-bus.diff;patch=1 \
- file://ep93xx-i2c.diff;patch=1 \
- file://ep93xx-leds.diff;patch=1 \
- file://ep93xx-serial-uartbaud.diff;patch=1 \
- file://ep93xx-serial-clocks.diff;patch=1 \
- file://ep93xx-timer-accuracy.diff;patch=1 \
- file://ep93xx-maverick-uniqid.patch;patch=1 \
- file://ts72xx-nfbit-fix.patch;patch=1 \
- file://ts72xx-machine-id-fix.patch;patch=1 \
- file://ts72xx-watchdog.patch;patch=1 \
- file://ts72xx-use-cpld-reset.patch;patch=1 \
- "
-
-SRC_URI_append_mx31moboard = "http://mobots.epfl.ch/mx31moboard/linux-2.6.22-moboard.patch.bz2;patch=1"
-
-S = "${WORKDIR}/linux-2.6.22"
diff --git a/recipes/linux/linux_2.6.22.bb b/recipes/linux/linux_2.6.22.bb
index bb8749f0d6..1e73139c5e 100644
--- a/recipes/linux/linux_2.6.22.bb
+++ b/recipes/linux/linux_2.6.22.bb
@@ -4,10 +4,12 @@ require linux.inc
DEFAULT_PREFERENCE = "-1"
DEFAULT_PREFERENCE_cm-x270 = "-1"
DEFAULT_PREFERENCE_bd-neon = "0"
+DEFAULT_PREFERENCE_mx31moboard = "1"
-PR = "r5"
+PR = "r6"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.22.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.19.bz2;patch=1 \
file://defconfig \
"
@@ -21,6 +23,8 @@ SRC_URI_append_cm-x270 = "\
file://0007-mmcsd_large_cards-r0.patch;patch=1 \
file://0008-cm-x270-nand-simplify-name.patch;patch=1"
+SRC_URI_append_mx31moboard = "http://mobots.epfl.ch/mx31moboard/linux-2.6.22-moboard.patch.bz2;patch=1"
+
CMDLINE_cm-x270 = "console=${CMX270_CONSOLE_SERIAL_PORT},38400 monitor=8 bpp=16 mem=64M mtdparts=physmap-flash.0:256k(boot)ro,0x180000(kernel),-(root);cm-x270-nand:64m(app),-(data) rdinit=/sbin/init root=mtd3 rootfstype=jffs2"
FILES_kernel-image_cm-x270 = ""
diff --git a/recipes/linux/linux_2.6.23+2.6.24-rc5.bb b/recipes/linux/linux_2.6.23+2.6.24-rc5.bb
index d22d5e8f27..32a3ac0a70 100644
--- a/recipes/linux/linux_2.6.23+2.6.24-rc5.bb
+++ b/recipes/linux/linux_2.6.23+2.6.24-rc5.bb
@@ -6,7 +6,7 @@ DEFAULT_PREFERENCE = "-1"
PR = "r3"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-2.6.24-rc5.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.24/patch-2.6.24-rc5.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.23+2.6.24-rc6.bb b/recipes/linux/linux_2.6.23+2.6.24-rc6.bb
index 73100b62c3..035e6054f6 100644
--- a/recipes/linux/linux_2.6.23+2.6.24-rc6.bb
+++ b/recipes/linux/linux_2.6.23+2.6.24-rc6.bb
@@ -6,7 +6,7 @@ DEFAULT_PREFERENCE = "-1"
PR = "r0"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-2.6.24-rc6.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.24/patch-2.6.24-rc6.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.23.bb b/recipes/linux/linux_2.6.23.bb
index 0f62a1c37c..a5367fda09 100644
--- a/recipes/linux/linux_2.6.23.bb
+++ b/recipes/linux/linux_2.6.23.bb
@@ -6,7 +6,7 @@ DEFAULT_PREFERENCE_mpc8313e-rdb = "1"
DEFAULT_PREFERENCE_mpc8323e-rdb = "1"
DEFAULT_PREFERENCE_avr32 = "1"
-PR = "r12"
+PR = "r13"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
file://binutils-buildid-arm.patch;patch=1 \
@@ -15,22 +15,23 @@ SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
"
# Bug fixes on the 2.6.23.x stable branch
-SRC_URI += "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.23.12.bz2;patch=1"
+SRC_URI += "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.23.17.bz2;patch=1"
# Real-time preemption (includes CFS). This is experimental and requires a different defconfig.
#SRC_URI += "file://patch-2.6.23.12-rt14;patch=1"
-# Only the Completely Fair Scheduler (CFS), the official backport from 2.6.24
-SRC_URI += "http://people.redhat.com/mingo/cfs-scheduler/sched-cfs-v2.6.23.12-v24.1.patch;patch=1"
+# Only the Completely Fair Scheduler (CFS), the official backport from 2.6.24 (adapted for 2.6.23.17)
+SRC_URI += "file://sched-cfs-v2.6.23.12-v24.1.patch;patch=1"
# Add support for squashfs-lzma (a highly compressed read-only filesystem)
SRC_URI += "http://kamikaze.waninkoko.info/patches/2.6.23/klight1/broken-out/squashfs-lzma-2.6.23.patch;patch=1"
+SRC_URI += "file://time.h.patch;patch=1"
+
# The Atmel patch doesn't apply against 2.6.23.12 :(
SRC_URI_avr32 = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
file://defconfig \
http://avr32linux.org/twiki/pub/Main/LinuxPatches/linux-2.6.23.atmel.3.patch.bz2;patch=1 \
"
SRC_URI_append_em-x270 = "\
- file://em-x270.patch;patch=1 \
- file://01-prevent_loop_timespec_add_ns.patch;patch=1"
+ file://em-x270.patch;patch=1 "
SRC_URI_append_cm-x270 = "\
file://0001-cm-x270-base2.patch;patch=1 \
@@ -90,5 +91,5 @@ python do_compulab_image() {
}
-addtask compulab_image after do_deploy before do_package
+addtask compulab_image after do_package before do_build
diff --git a/recipes/linux/linux_2.6.24.bb b/recipes/linux/linux_2.6.24.bb
index cb4389a702..c4549795d6 100644
--- a/recipes/linux/linux_2.6.24.bb
+++ b/recipes/linux/linux_2.6.24.bb
@@ -13,12 +13,12 @@ DEFAULT_PREFERENCE_hipox = "1"
DEFAULT_PREFERENCE_cs-e9302 = "1"
DEFAULT_PREFERENCE_smartq5 = "1"
-PR = "r33"
+PR = "r34"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.7.bz2;patch=1 \
file://squashfs-lzma-2.6.24.patch;patch=1 \
file://ubifs-v2.6.24.patch;patch=1 \
- file://time.h.patch;patch=1 \
file://defconfig"
# Moved away temporarely until committed properly (work in progress).
diff --git a/recipes/linux/linux_2.6.25.20.bb b/recipes/linux/linux_2.6.25.20.bb
deleted file mode 100644
index 6440e0650b..0000000000
--- a/recipes/linux/linux_2.6.25.20.bb
+++ /dev/null
@@ -1,33 +0,0 @@
-###########################################
-#@MAINTAINER: Marco Cavallini <m.cavallini@koansoftware.com>
-# linux_2.6.25.20.bb
-# recipe file for PM9261 and PM9263
-###########################################
-
-require linux.inc
-
-PR = "r2"
-
-DEFAULT_PREFERENCE = "-1"
-DEFAULT_PREFERENCE_ronetix-pm9263 = "1"
-DEFAULT_PREFERENCE_ronetix-pm9261 = "1"
-
-SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.25.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.25.20.bz2;patch=1 \
- file://defconfig"
-
-# WARNING: for following patched is required the proper entry in conf/checksums.ini
-
-SRC_URI_append_ronetix-pm9263 = " \
- http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
- "
-
-SRC_URI_append_ronetix-pm9261 = " \
- http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
- "
-
-S = "${WORKDIR}/linux-2.6.25/"
diff --git a/recipes/linux/linux_2.6.25.bb b/recipes/linux/linux_2.6.25.bb
index b6ed3d82e2..a89374a652 100644
--- a/recipes/linux/linux_2.6.25.bb
+++ b/recipes/linux/linux_2.6.25.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r6"
+PR = "r8"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -13,8 +13,11 @@ DEFAULT_PREFERENCE_alix = "1"
DEFAULT_PREFERENCE_at32stk1000 = "1"
DEFAULT_PREFERENCE_at91-l9260 = "1"
DEFAULT_PREFERENCE_m8050 = "1"
+DEFAULT_PREFERENCE_ronetix-pm9263 = "1"
+DEFAULT_PREFERENCE_ronetix-pm9261 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.25.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.20.bz2;patch=1 \
file://defconfig"
SRC_URI_append_mpc8313e-rdb = "\
@@ -41,6 +44,18 @@ SRC_URI_append_at91-l9260 = " \
http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
"
+SRC_URI_append_ronetix-pm9263 = " \
+ http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
+"
+
+SRC_URI_append_ronetix-pm9261 = " \
+ http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
+"
+
SRC_URI_append_m8050 = " file://m8050.diff;patch=1 file://update-mach-types.diff;patch=1"
CMDLINE_cm-x270 = "console=${CMX270_CONSOLE_SERIAL_PORT},38400 monitor=1 mem=64M mtdparts=physmap-flash.0:256k(boot)ro,0x180000(kernel),-(root);cm-x270-nand:64m(app),-(data) rdinit=/sbin/init root=mtd3 rootfstype=jffs2"
diff --git a/recipes/linux/linux_2.6.26.bb b/recipes/linux/linux_2.6.26.bb
index 53d55577b0..775d2d8ba1 100644
--- a/recipes/linux/linux_2.6.26.bb
+++ b/recipes/linux/linux_2.6.26.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r9"
+PR = "r10"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -11,6 +11,7 @@ DEFAULT_PREFERENCE_topas910 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.26.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.8.bz2;patch=1 \
file://defconfig"
SRC_URI_append_boc01 = "\
@@ -36,8 +37,7 @@ SRC_URI_append_mpc8313e-rdb = "\
file://mpc8313e-rdb-eth-fixed.patch;patch=1 \
"
-SRC_URI_append_topas910 = " ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.26.5.bz2;patch=1 \
- http://www.bplan-gmbh.org/data/toshiba/topas/linux/2.6.26.5/patch_2.6.26.5_topas910.bz2;patch=1"
+SRC_URI_append_topas910 = "http://www.bplan-gmbh.org/data/toshiba/topas/linux/2.6.26.5/patch_2.6.26.5_topas910.bz2;patch=1"
# see http://bugzilla.kernel.org/show_bug.cgi?id=11143
do_stage_append() {
diff --git a/recipes/linux/linux_2.6.27.bb b/recipes/linux/linux_2.6.27.bb
index 0b873a65a7..9e8d6a32e0 100644
--- a/recipes/linux/linux_2.6.27.bb
+++ b/recipes/linux/linux_2.6.27.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r8"
+PR = "r11"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -9,6 +9,7 @@ DEFAULT_PREFERENCE_progear = "1"
DEFAULT_PREFERENCE_simpad = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.41.bz2;patch=1 \
file://defconfig "
SRC_URI_append_boc01 = "\
diff --git a/recipes/linux/linux_2.6.28.bb b/recipes/linux/linux_2.6.28.bb
index 98cf19129d..05fe815d98 100644
--- a/recipes/linux/linux_2.6.28.bb
+++ b/recipes/linux/linux_2.6.28.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r12"
+PR = "r13"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -14,6 +14,7 @@ DEFAULT_PREFERENCE_wrap = "1"
DEFAULT_PREFERENCE_tx27 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.28.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.10.bz2;patch=1 \
file://defconfig"
SRC_URI_append_at91sam9263ek = " \
diff --git a/recipes/linux/linux_2.6.29+2.6.30-rc5.bb b/recipes/linux/linux_2.6.29+2.6.30-rc5.bb
index 2a38a546e0..08cb2fe3c5 100644
--- a/recipes/linux/linux_2.6.29+2.6.30-rc5.bb
+++ b/recipes/linux/linux_2.6.29+2.6.30-rc5.bb
@@ -19,6 +19,6 @@ DEFAULT_PREFERENCE_spitz = "-1"
DEFAULT_PREFERENCE_tosa = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${OLD_KERNEL_RELEASE}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-${KERNEL_RELEASE}.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.30/patch-${KERNEL_RELEASE}.bz2;patch=1 \
file://defconfig"
diff --git a/recipes/linux/linux_2.6.29.bb b/recipes/linux/linux_2.6.29.bb
index f78193d711..240faf224d 100644
--- a/recipes/linux/linux_2.6.29.bb
+++ b/recipes/linux/linux_2.6.29.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r9"
+PR = "r10"
S = "${WORKDIR}/linux-2.6.29"
@@ -19,6 +19,7 @@ DEFAULT_PREFERENCE_tqm8540 = "1"
DEFAULT_PREFERENCE_stamp9g20evb = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.29.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.6.bz2;patch=1 \
file://defconfig"
SRC_URI_append_boc01 = "\
@@ -42,14 +43,12 @@ SRC_URI_append_micro2440 = " \
file://0002-S3C-Backported-openmoko-s-touchscreen-filters.patch;patch=1 \
file://0003-VENDOR-armworks-logo.patch;patch=1 \
file://0004-920T-Use-specific-920t-mtune.patch;patch=1 \
- file://0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch;patch=1 \
file://0006-S3C-Allow-the-machine-code-to-get-the-BBT-table-fro.patch;patch=1 \
file://0007-MINI2440-Add-machine-support.patch;patch=1 \
file://0008-MINI2440-Delays-command-check-response-on-SD.patch;patch=1 \
file://0009-MINI2440-Rename-the-SoC-tty-names.patch;patch=1 \
file://0010-MINI2440-creates-a-mini2440_defconfig-file.patch;patch=1 \
file://0011-MINI2440-Add-touchscreen-support.patch;patch=1 \
- file://0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch;patch=1 \
"
SRC_URI_append_tosa = " \
diff --git a/recipes/linux/linux_2.6.30.bb b/recipes/linux/linux_2.6.30.bb
index c30caa4b36..19247e2e5b 100644
--- a/recipes/linux/linux_2.6.30.bb
+++ b/recipes/linux/linux_2.6.30.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r4"
+PR = "r5"
S = "${WORKDIR}/linux-${PV}"
@@ -16,7 +16,7 @@ DEFAULT_PREFERENCE_at91sam9263ek = "-1"
DEFAULT_PREFERENCE_tosa = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.9.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.10.bz2;patch=1 \
http://maxim.org.za/AT91RM9200/2.6/2.6.30-at91.patch.gz;patch=1 \
file://aufs2-30.patch;patch=1 \
file://defconfig"
diff --git a/recipes/linux/linux_2.6.31.bb b/recipes/linux/linux_2.6.31.bb
index 839e808451..dcce779ebd 100644
--- a/recipes/linux/linux_2.6.31.bb
+++ b/recipes/linux/linux_2.6.31.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r6"
+PR = "r8"
S = "${WORKDIR}/linux-${PV}"
@@ -16,7 +16,7 @@ DEFAULT_PREFERENCE_ben-nanonote = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.5.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.8.bz2;patch=1 \
file://defconfig"
SRC_URI += "file://0001-Squashfs-move-zlib-decompression-wrapper-code-into.patch;patch=1 \
diff --git a/recipes/linux/linux_2.6.32.bb b/recipes/linux/linux_2.6.32.bb
index 99db01bf8b..be3d8ad7c6 100644
--- a/recipes/linux/linux_2.6.32.bb
+++ b/recipes/linux/linux_2.6.32.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r1"
+PR = "r2"
S = "${WORKDIR}/linux-${PV}"
@@ -15,6 +15,7 @@ DEFAULT_PREFERENCE_spitz = "-1"
DEFAULT_PREFERENCE_tosa = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2;name=kernel \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.1.bz2;patch=1 \
file://defconfig"
SRC_URI[kernel.md5sum] = "260551284ac224c3a43c4adac7df4879"
diff --git a/recipes/mesa/mesa-common-old.inc b/recipes/mesa/mesa-common-old.inc
index 402a82a6b3..a88a07f019 100644
--- a/recipes/mesa/mesa-common-old.inc
+++ b/recipes/mesa/mesa-common-old.inc
@@ -3,8 +3,13 @@ PE = "2"
SRC_URI = "${SOURCEFORGE_MIRROR}/mesa3d/MesaLib-${PV}.tar.bz2 \
file://mklib-rpath-link.patch;patch=1"
+
S = "${WORKDIR}/Mesa-${PV}"
+PROVIDES = "virtual/libgl"
+
LICENSE = "LGPL"
+INC_PR = "r3"
+
DEPENDS = "makedepend-native xf86vidmodeproto glproto virtual/libx11 libxext libxxf86vm libxi libxmu libice"
diff --git a/recipes/mesa/mesa-dri_6.5.2.bb b/recipes/mesa/mesa-dri_6.5.2.bb
index f94ea8b3d2..594848fd8b 100644
--- a/recipes/mesa/mesa-dri_6.5.2.bb
+++ b/recipes/mesa/mesa-dri_6.5.2.bb
@@ -6,7 +6,7 @@ FILESPATH = "${FILE_DIRNAME}/mesa-${PV}:${FILE_DIRNAME}/files:${FILE_DIRNAME}"
PACKAGES_DYNAMIC = "mesa-dri-driver-*"
-PR = "r2"
+PR = "${INC_PR}.0"
DEPENDS += "libdrm"
# DRI is useless without the kernel drivers
diff --git a/recipes/mesa/mesa-dri_7.0.3.bb b/recipes/mesa/mesa-dri_7.0.3.bb
index 330efb23af..0cb2fe2cce 100644
--- a/recipes/mesa/mesa-dri_7.0.3.bb
+++ b/recipes/mesa/mesa-dri_7.0.3.bb
@@ -7,7 +7,7 @@ FILESPATH = "${FILE_DIRNAME}/mesa-${PV}:${FILE_DIRNAME}/files:${FILE_DIRNAME}"
PACKAGES_DYNAMIC = "mesa-dri-driver-*"
-PR = "r1"
+PR = "${INC_PR}.0"
DEPENDS += "libdrm"
# DRI is useless without the kernel drivers
diff --git a/recipes/mesa/mesa-xlib_7.2.bb b/recipes/mesa/mesa-xlib_7.2.bb
index f6c44ebdb0..1e752dce6f 100644
--- a/recipes/mesa/mesa-xlib_7.2.bb
+++ b/recipes/mesa/mesa-xlib_7.2.bb
@@ -5,7 +5,8 @@ LIB_DEPS = "virtual/libx11 libxext libxxf86vm libxdamage libxfixes"
DEPENDS = "${PROTO_DEPS} ${LIB_DEPS}"
-PR = "r4"
+PE = "1"
+PR = "${INC_PR}.0"
EXTRA_OECONF += "--with-driver=xlib"
diff --git a/recipes/mesa/mesa-xlib_7.4.bb b/recipes/mesa/mesa-xlib_7.4.bb
new file mode 100644
index 0000000000..1e752dce6f
--- /dev/null
+++ b/recipes/mesa/mesa-xlib_7.4.bb
@@ -0,0 +1,20 @@
+include mesa-common.inc
+
+PROTO_DEPS = "xf86driproto glproto"
+LIB_DEPS = "virtual/libx11 libxext libxxf86vm libxdamage libxfixes"
+
+DEPENDS = "${PROTO_DEPS} ${LIB_DEPS}"
+
+PE = "1"
+PR = "${INC_PR}.0"
+
+EXTRA_OECONF += "--with-driver=xlib"
+
+PACKAGES =+ "${PN}-xprogs"
+
+FILES_${PN}-xprogs = "${bindir}/glxdemo ${bindir}/glxgears ${bindir}/glxheads ${bindir}/glxinfo"
+
+do_install_append () {
+ install -d ${D}/${bindir}
+ install -m 0755 ${S}/progs/xdemos/{glxdemo,glxgears,glxheads,glxinfo} ${D}/${bindir}
+}
diff --git a/recipes/mesa/mesa_6.5.2.bb b/recipes/mesa/mesa_6.5.2.bb
index 79993cc1f1..66078e1a7b 100644
--- a/recipes/mesa/mesa_6.5.2.bb
+++ b/recipes/mesa/mesa_6.5.2.bb
@@ -1,7 +1,7 @@
-PR = "r1"
-
include mesa-mesa.inc
+PR = "${INC_PR}.0"
+
SRC_URI_append = " file://fix-host-compile.patch;patch=1 "
diff --git a/recipes/mesa/mesa_7.0.2.bb b/recipes/mesa/mesa_7.0.2.bb
index 3eb1fbef8b..66078e1a7b 100644
--- a/recipes/mesa/mesa_7.0.2.bb
+++ b/recipes/mesa/mesa_7.0.2.bb
@@ -1,5 +1,7 @@
include mesa-mesa.inc
+PR = "${INC_PR}.0"
+
SRC_URI_append = " file://fix-host-compile.patch;patch=1 "
diff --git a/recipes/mpc/mpc-native_0.8.1.bb b/recipes/mpc/mpc-native_0.8.1.bb
new file mode 100644
index 0000000000..8dba0b7fc4
--- /dev/null
+++ b/recipes/mpc/mpc-native_0.8.1.bb
@@ -0,0 +1,3 @@
+require mpc_${PV}.bb
+inherit native
+DEPENDS = "mpfr-native gmp-native"
diff --git a/recipes/mpc/mpc.inc b/recipes/mpc/mpc.inc
new file mode 100644
index 0000000000..6e18e588be
--- /dev/null
+++ b/recipes/mpc/mpc.inc
@@ -0,0 +1,6 @@
+DESCRIPTION = "Mpc is a C library for the arithmetic of complex numbers with arbitrarily high precision and correct rounding of the result. It is built upon and follows the same principles as Mpfr"
+HOMEPAGE = "http://www.multiprecision.org/"
+LICENSE = "LGPLv2"
+SECTION = "libs"
+
+inherit autotools
diff --git a/recipes/mpc/mpc_0.8.1.bb b/recipes/mpc/mpc_0.8.1.bb
new file mode 100644
index 0000000000..b62c08673e
--- /dev/null
+++ b/recipes/mpc/mpc_0.8.1.bb
@@ -0,0 +1,10 @@
+require mpc.inc
+
+DEPENDS = "gmp mpfr"
+
+SRC_URI = "http://www.multiprecision.org/mpc/download/mpc-${PV}.tar.gz"
+S = "${WORKDIR}/mpc-${PV}"
+
+do_stage() {
+ autotools_stage_all
+}
diff --git a/recipes/networkmanager/networkmanager_0.7.1.997.bb b/recipes/networkmanager/networkmanager_0.7.1.997.bb
index fe90d7ed71..8b89f9776c 100644
--- a/recipes/networkmanager/networkmanager_0.7.1.997.bb
+++ b/recipes/networkmanager/networkmanager_0.7.1.997.bb
@@ -1,5 +1,7 @@
require networkmanager-0.7.inc
+PR = "r1"
+
SRC_URI += " \
file://remove-gtk-doc-make.patch;patch=1 \
file://nm-system-settings.conf \
@@ -22,5 +24,9 @@ do_install_append () {
install -d ${D}/etc/NetworkManager/
install -m 0644 ${WORKDIR}/nm-system-settings.conf ${D}/etc/NetworkManager/
install -m 0755 ${WORKDIR}/NetworkManager ${D}/etc/init.d
+
+ # Install an empty VPN folder as nm-connection-editor will happily segfault without it :o.
+ # With or without VPN support built in ;).
+ install -d ${D}/etc/NetworkManager/VPN
}
diff --git a/recipes/openmoko-3rdparty/guitartune_svn.bb b/recipes/openmoko-3rdparty/guitartune_svn.bb
new file mode 100644
index 0000000000..37fbd5ffa3
--- /dev/null
+++ b/recipes/openmoko-3rdparty/guitartune_svn.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "guitar tuner for openmoko phones"
+HOMEPAGE = "http://code.google.com/p/guitartune"
+AUTHOR = "cchandel"
+LICENSE = "GPLv2"
+SECTION = "e/apps"
+DEPENDS = "gtk+ libglade fftw sqlite3"
+
+PV = "0.36+svnr${SRCPV}"
+
+SRC_URI = "svn://guitartune.googlecode.com/svn;module=trunk;proto=http"
+S = "${WORKDIR}/trunk"
+
+inherit autotools
+
+do_install_append() {
+ install -d "${D}/${datadir}/pixmaps"
+ install -m 0644 "${S}/resources/guitartune.png" "${D}/${datadir}/pixmaps"
+ install -d "${D}/${datadir}/applications"
+ install -m 0644 "${S}/resources/guitartune.desktop" "${D}/${datadir}/applications"
+ install -d "${D}/${datadir}/guitartune"
+ for ico in "${S}/resources/"*.png; do
+ if [ "$(basename $ico)" != "guitartune.png" ]; then
+ install -m 0644 $ico "${D}/${datadir}/guitartune"
+ fi
+ done
+}
+
+FILES_${PN} += "/usr/share/guitartune/* /usr/share/applications/* /usr/share/pixmaps/*"
diff --git a/recipes/powervr-drivers/libgles-omap3.inc b/recipes/powervr-drivers/libgles-omap3.inc
index ddec910efe..360cb7790e 100644
--- a/recipes/powervr-drivers/libgles-omap3.inc
+++ b/recipes/powervr-drivers/libgles-omap3.inc
@@ -1,6 +1,7 @@
DESCRIPTION = "libGLES for the omap3"
LICENCE = "proprietary-binary"
+COMPATIBLE_MACHINE = "(am3517-evm|beagleboard|cm-t35|igep0020|omap3-pandora|omap3-touchbook|omap3evm|omapzoom|omapzoom2|overo|palmpre)"
RDEPENDS_${PN} += "devmem2"
#HACK! These are binaries, so we can't guarantee that LDFLAGS match :(
diff --git a/recipes/qt4/qt-4.6.0.inc b/recipes/qt4/qt-4.6.0.inc
index 97ad6cdd74..b29b708d74 100644
--- a/recipes/qt4/qt-4.6.0.inc
+++ b/recipes/qt4/qt-4.6.0.inc
@@ -1,7 +1,5 @@
DEFAULT_PREFERENCE = "-1"
-require qt4-embedded.inc
-
SRC_URI = "ftp://ftp.trolltech.com/qt/source/qt-everywhere-opensource-src-${PV}.tar.gz \
file://0001-cross-compile.patch;patch=1 \
file://0002-fix-resinit-declaration.patch;patch=1 \
diff --git a/recipes/qt4/qt4-embedded-gles_4.6.0.bb b/recipes/qt4/qt4-embedded-gles_4.6.0.bb
index f988f9828b..f819c66212 100644
--- a/recipes/qt4/qt4-embedded-gles_4.6.0.bb
+++ b/recipes/qt4/qt4-embedded-gles_4.6.0.bb
@@ -1,4 +1,4 @@
-
+require qt4-embedded.inc
PR = "${INC_PR}.0"
QT_GLFLAGS = "-opengl es2 -openvg"
diff --git a/recipes/qt4/qt4-embedded_4.6.0.bb b/recipes/qt4/qt4-embedded_4.6.0.bb
index 3bde65014d..58fda71ad1 100644
--- a/recipes/qt4/qt4-embedded_4.6.0.bb
+++ b/recipes/qt4/qt4-embedded_4.6.0.bb
@@ -1,3 +1,4 @@
+require qt4-embedded.inc
PR = "${INC_PR}.0"
diff --git a/recipes/qt4/qt4-tools-sdk_4.6.0.bb b/recipes/qt4/qt4-tools-sdk_4.6.0.bb
new file mode 100644
index 0000000000..b74b8a27e0
--- /dev/null
+++ b/recipes/qt4/qt4-tools-sdk_4.6.0.bb
@@ -0,0 +1,74 @@
+DESCRIPTION = "SDK tools for Qt/[X11|Mac|Embedded] version 4.x"
+DEPENDS = "zlib-native dbus-native"
+SECTION = "libs"
+HOMEPAGE = "http://www.trolltech.com"
+PRIORITY = "optional"
+LICENSE = "LGPLv2.1 GPLv3"
+
+DEFAULT_PREFERENCE = "-1"
+inherit sdk
+
+SRC_URI = "ftp://ftp.trolltech.com/qt/source/qt-everywhere-opensource-src-${PV}.tar.gz \
+ file://configure-lflags.patch;patch=1 \
+ file://qt-config.patch;patch=1 \
+ file://g++.conf \
+ file://linux.conf"
+
+S = "${WORKDIR}/qt-everywhere-opensource-src-${PV}"
+
+
+# FIXME: make it work with "${STAGING_BINDIR_NATIVE}/pkg-config --cflags dbus-1"
+EXTRA_OECONF = "-prefix ${prefix} \
+ -qt-libjpeg -qt-gif -system-zlib \
+ -no-libjpeg -no-libpng \
+ -no-accessibility \
+ -no-cups \
+ -no-exceptions \
+ -no-nas-sound \
+ -no-nis \
+ -verbose -release -fast -static \
+ -qt3support \
+ -I${STAGING_DIR_NATIVE}/usr/include \
+ -I${STAGING_DIR_NATIVE}/usr/include/dbus-1.0 \
+ -I${STAGING_DIR_NATIVE}/usr/lib/dbus-1.0/include"
+
+# yank default -e, otherwise we get the following error:
+# moc_qbuffer.cpp: No such file or directory
+EXTRA_OEMAKE = " "
+
+do_configure() {
+ (echo o; echo yes) | ./configure ${EXTRA_OECONF} || die "Configuring qt failed. EXTRA_OECONF was ${EXTRA_OECONF}"
+}
+
+TOBUILD = "\
+ src/tools/bootstrap \
+ src/tools/moc \
+ src/corelib \
+ src/sql \
+ src/dbus \
+ src/qt3support \
+ src/xml \
+ src/tools/uic \
+ src/tools/rcc \
+ src/network \
+ src/gui \
+ src/tools/uic3 \
+ tools/linguist/lrelease \
+ tools/linguist/lupdate \
+ tools/qdbus \
+"
+
+do_compile() {
+ for i in ${TOBUILD}; do
+ cd ${S}/$i && oe_runmake CC="${CC}" CXX="${CXX}"
+ done
+}
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 bin/qmake ${D}${bindir}/qmake2
+ for i in moc uic uic3 rcc lrelease lupdate qdbuscpp2xml qdbusxml2cpp; do
+ install -m 0755 bin/${i} ${D}${bindir}/${i}4
+ done
+}
+
diff --git a/recipes/qt4/qt4-x11-free-gles_4.6.0.bb b/recipes/qt4/qt4-x11-free-gles_4.6.0.bb
index 36b6ec5a25..fcb935b8c7 100644
--- a/recipes/qt4/qt4-x11-free-gles_4.6.0.bb
+++ b/recipes/qt4/qt4-x11-free-gles_4.6.0.bb
@@ -1,4 +1,4 @@
-
+require qt4-x11-free.inc
PR = "${INC_PR}.0"
QT_GLFLAGS = "-opengl es2 -openvg"
diff --git a/recipes/qt4/qt4-x11-free_4.6.0.bb b/recipes/qt4/qt4-x11-free_4.6.0.bb
index 1b5b4524c8..80a25303bb 100644
--- a/recipes/qt4/qt4-x11-free_4.6.0.bb
+++ b/recipes/qt4/qt4-x11-free_4.6.0.bb
@@ -1,3 +1,4 @@
+require qt4-x11-free.inc
PR = "${INC_PR}.0"
require qt-4.6.0.inc
diff --git a/recipes/qt4/qt4.inc b/recipes/qt4/qt4.inc
index 7d600a825c..62fcec11a2 100644
--- a/recipes/qt4/qt4.inc
+++ b/recipes/qt4/qt4.inc
@@ -71,6 +71,7 @@ python __anonymous () {
}
OTHER_PACKAGES = "\
+ ${QT_BASE_NAME}-tools \
${QT_BASE_NAME}-assistant \
${QT_BASE_NAME}-common \
${QT_BASE_NAME}-dbus \
@@ -95,6 +96,7 @@ RRECOMMENDS_${PN} = "${LIB_PACKAGES} ${OTHER_PACKAGES}"
RRECOMMENDS_${PN}-dev = "${DEV_PACKAGES}"
RRECOMMENDS_${PN}-dbg = "${DBG_PACKAGES}"
+FILES_${QT_BASE_NAME}-tools = "${bindir}/uic* ${bindir}/moc ${bindir}/rcc ${bindir}/qttracereplay"
FILES_${QT_BASE_NAME}-assistant = "${bindir}/*assistant* ${bindir}/qcollectiongenerator ${bindir}/qhelpconverter ${bindir}/qhelpgenerator"
FILES_${QT_BASE_NAME}-assistant-dbg = "${bindir}/.debug/*assistant* ${bindir}/.debug/qcollectiongenerator ${bindir}/.debug/qhelpconverter ${bindir}/.debug/qhelpgenerator"
FILES_${QT_BASE_NAME}-common = "${bindir}/qtconfig"
@@ -243,6 +245,7 @@ do_install() {
-e s#-L${STAGING_LIBDIR}##g \
-e s#-L${libdir}##g \
-e s#'$(OE_QMAKE_LIBS_X11)'#"${OE_QMAKE_LIBS_X11}"#g \
+ -e s#"-Wl,-rpath-link,${S}/lib"##g \
${D}${libdir}/*.la ${D}${libdir}/*.prl ${D}${libdir}/pkgconfig/*.pc
# fix pkgconfig files
@@ -257,58 +260,4 @@ do_install() {
touch ${D}/${libdir}/fonts/fontdir
}
-STAGE_TEMP = "${WORKDIR}/temp-staging"
-do_stage() {
- rm -rf ${STAGE_TEMP}
- mkdir -p ${STAGE_TEMP}
- oe_runmake install INSTALL_ROOT=${STAGE_TEMP}
-
- # fix pkgconfig, libtool and prl files
- sed -i -e s#-L${S}/lib##g \
- -e s#-L${STAGING_LIBDIR}##g \
- -e s#-L${libdir}##g \
- -e s#'$(OE_QMAKE_LIBS_X11)'#"${OE_QMAKE_LIBS_X11}"#g \
- ${STAGE_TEMP}${libdir}/*.la ${STAGE_TEMP}${libdir}/*.prl ${STAGE_TEMP}${libdir}/pkgconfig/*.pc
-
- # fix pkgconfig files
- sed -i -e s#"moc_location=.*$"## \
- -e s#"uic_location=.*$"## \
- ${STAGE_TEMP}/${libdir}/pkgconfig/*.pc
- for name in ${QT_LIB_NAMES}; do
- sed -i -e "/Requires/s#${name}#${name}${QT_LIBINFIX}#"g ${D}${libdir}/pkgconfig/*.pc
- done
-
- # fix libtool files
- sed -i -e s#installed=yes#installed=no#g ${STAGE_TEMP}/${libdir}/*.la
-
- # install headers
- install -d ${STAGING_INCDIR}
- cp -pPRf ${STAGE_TEMP}/${includedir}/* ${STAGING_INCDIR}/
-
- # install libraries
- install -d ${STAGING_LIBDIR}
- for i in ${STAGE_TEMP}/${libdir}/*.prl; do
- cp -pPRf $i ${STAGING_LIBDIR}
- cp -pPRf ${STAGE_TEMP}/${libdir}/$(basename $i .prl).la ${STAGING_LIBDIR} || true
- oe_libinstall -C ${STAGE_TEMP}/${libdir} -a $(basename $i .prl) ${STAGING_LIBDIR} || true
- oe_libinstall -C ${STAGE_TEMP}/${libdir} -so $(basename $i .prl) ${STAGING_LIBDIR} || true
- done
-
- # install pkgconfig files
- install -d ${STAGING_LIBDIR}/pkgconfig
- cp -pPRf ${STAGE_TEMP}/${libdir}/pkgconfig/*.pc ${STAGING_LIBDIR}/pkgconfig/
-
- # install mkspecs
- install -d ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs
- cp -pPRf ${STAGE_TEMP}/${datadir}/${QT_DIR_NAME}/mkspecs/* ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/
-
- rm -rf ${STAGE_TEMP}
-
- # FIXME: install symlinks to tools?
- #install -d ${STAGING_DATADIR}/${QT_DIR_NAME}/bin
- #ln -sf ${STAGING_BINDIR_NATIVE}/qmake2 ${STAGING_DATADIR}/${QT_DIR_NAME}/bin/qmake
- #for qttool in moc uic uic3 rcc lrelease lupdate; do
- # ln -sf ${STAGING_BINDIR_NATIVE}/${qttool}4 ${STAGING_DATADIR}/${QT_DIR_NAME}/bin/${qttool}
- #done
-}
diff --git a/recipes/qt4/wolfenqt-e_git.bb b/recipes/qt4/wolfenqt-e_git.bb
new file mode 100644
index 0000000000..661838842a
--- /dev/null
+++ b/recipes/qt4/wolfenqt-e_git.bb
@@ -0,0 +1,5 @@
+WOLFVARIANT = "e"
+require wolfenqt.inc
+
+
+
diff --git a/recipes/qt4/wolfenqt.inc b/recipes/qt4/wolfenqt.inc
new file mode 100644
index 0000000000..e1e08cf6ab
--- /dev/null
+++ b/recipes/qt4/wolfenqt.inc
@@ -0,0 +1,19 @@
+
+PE = "1"
+PV = "0.0"
+PR_append = "+gitr${SRCREV}"
+
+inherit qt4${WOLFVARIANT}
+
+SRCREV = "f43dfa2bfa1f72abd3500dfc94248b17c5f9ae05"
+SRC_URI = "git://gitorious.org/qt-labs/wolfenqt.git;protocol=git"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 qt3d ${D}${bindir}/wolfenqt-${WOLFVARIANT}
+}
+
+
+
diff --git a/recipes/qt4/wolfenqt_git.bb b/recipes/qt4/wolfenqt_git.bb
new file mode 100644
index 0000000000..2d762f3dc2
--- /dev/null
+++ b/recipes/qt4/wolfenqt_git.bb
@@ -0,0 +1,5 @@
+WOLFVARIANT = "x11"
+require wolfenqt.inc
+
+
+
diff --git a/recipes/sg3-utils/sg3-utils_1.24.bb b/recipes/sg3-utils/sg3-utils_1.24.bb
index 4a2c378676..8a4a603e8c 100644
--- a/recipes/sg3-utils/sg3-utils_1.24.bb
+++ b/recipes/sg3-utils/sg3-utils_1.24.bb
@@ -4,7 +4,9 @@ HOMEPAGE = "http://packages.qa.debian.org/s/sg3-utils.html"
SECTION = "console/admin"
LICENSE = "GPLv2"
AUTHOR = "Eric Schwartz (Skif) <emschwar@debian.org>"
-PR = "r1"
+PR = "r2"
+
+TARGET_CC_ARCH += "${LDFLAGS}"
SRC_URI = "${DEBIAN_MIRROR}/main/s/sg3-utils/sg3-utils_${PV}.orig.tar.gz \
file://Makefile-r1.patch;patch=1"
diff --git a/recipes/shr/initscripts-shr/palmpre/usb-gadget.sh b/recipes/shr/initscripts-shr/palmpre/usb-gadget.sh
new file mode 100644
index 0000000000..d7554954d6
--- /dev/null
+++ b/recipes/shr/initscripts-shr/palmpre/usb-gadget.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# usb gadget configuration:
+# there already different configurations on the palm pre defined,
+# we choose nr 5 cause it provides the usbnet, novacom
+# and storage gadget
+if [ -e /sys/class/usb_gadget/config_num ]; then
+ echo 5 > /sys/class/usb_gadget/config_num
+fi
+
diff --git a/recipes/shr/initscripts-shr_0.0.1.bb b/recipes/shr/initscripts-shr_0.0.1.bb
index f1be1c2fc7..89df6ce375 100644
--- a/recipes/shr/initscripts-shr_0.0.1.bb
+++ b/recipes/shr/initscripts-shr_0.0.1.bb
@@ -5,7 +5,7 @@ DEPENDS = ""
RDEPENDS = ""
LICENSE = "GPL"
PV = "0.0.1"
-PR = "r13"
+PR = "r14"
RPROVIDES_${PN} = "initscripts"
RCONFLICTS_${PN} = "initscripts"
@@ -33,6 +33,7 @@ SRC_URI = "file://alignment.sh \
file://umountfs \
file://umountnfs.sh \
"
+SRC_URI_append_palmpre = " file://usb-gadget.sh"
inherit base
@@ -76,6 +77,11 @@ do_install () {
install -m 0755 ${WORKDIR}/umountfs ${D}${sysconfdir}/init.d
install -m 0755 ${WORKDIR}/umountnfs.sh ${D}${sysconfdir}/init.d
+ if [ "${MACHINE}" == "palmpre" ]; then
+ install -m 0755 ${WORKDIR}/usb-gadget.sh ${D}${sysconfdir}/init.d
+ ln -sf ../init.d/usb-gadget.sh ${D}${sysconfdir}/rcS.d/S00usb-gadget.sh
+ fi
+
#
# Create runlevel links
#
diff --git a/recipes/shr/shr-launcher_svn.bb b/recipes/shr/shr-launcher_svn.bb
index 807d3a2cea..481be9021b 100644
--- a/recipes/shr/shr-launcher_svn.bb
+++ b/recipes/shr/shr-launcher_svn.bb
@@ -8,15 +8,9 @@ DEPENDS = "elementary eina edbus"
PV = "0.0.1+svnr${SRCPV}"
PR = "r4"
-SRC_URI = "svn://shr-launcher.googlecode.com/svn/trunk;module=.;proto=http"
+SRC_URI = "svn://shr-launcher.googlecode.com/svn;module=trunk;proto=http"
-do_configure_prepend() {
- # all links to /usr/share/automake-1.10/
- rm -f ${S}/depcomp ${S}/config.guess ${S}/config.sub ${S}/INSTALL ${S}/install-sh ${S}/missing
- touch ${S}/INSTALL
-}
-
-S = "${WORKDIR}"
+S = "${WORKDIR}/trunk"
inherit autotools
diff --git a/recipes/tasks/task-shr-feed.bb b/recipes/tasks/task-shr-feed.bb
index 1e5706d777..4b3e764d5e 100644
--- a/recipes/tasks/task-shr-feed.bb
+++ b/recipes/tasks/task-shr-feed.bb
@@ -27,6 +27,7 @@ RDEPENDS_${PN} += "\
gpe-timesheet \
gpe-contacts \
gtkmm \
+ guitartune \
mc \
mplayer \
navit \
diff --git a/recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch b/recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch
deleted file mode 100644
index 8c2b85db13..0000000000
--- a/recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-Index: git/libc/inet/rpc/rpc_thread.c
-===================================================================
---- git.orig/libc/inet/rpc/rpc_thread.c 2009-12-04 13:13:09.000000000 -0800
-+++ git/libc/inet/rpc/rpc_thread.c 2009-12-04 13:13:17.000000000 -0800
-@@ -14,6 +14,7 @@
- #ifdef __UCLIBC_HAS_THREADS__
-
- #include <bits/libc-tsd.h>
-+#include <bits/libc-lock.h>
-
- /* Variable used in non-threaded applications or for the first thread. */
- static struct rpc_thread_variables __libc_tsd_RPC_VARS_mem;
diff --git a/recipes/uclibc/uclibc.inc b/recipes/uclibc/uclibc.inc
index bb1e7da6bf..23810ea2a4 100644
--- a/recipes/uclibc/uclibc.inc
+++ b/recipes/uclibc/uclibc.inc
@@ -182,3 +182,6 @@ do_install() {
chmod +x ${D}/${base_libdir}/*
}
+get_monotonic_srcrev () {
+ (cd ${S}; eval `git rev-list HEAD|wc -l`)
+}
diff --git a/recipes/uclibc/uclibc_nptl.bb b/recipes/uclibc/uclibc_nptl.bb
index cb858a6996..a978025a1e 100644
--- a/recipes/uclibc/uclibc_nptl.bb
+++ b/recipes/uclibc/uclibc_nptl.bb
@@ -34,7 +34,6 @@ SRC_URI = "git://uclibc.org/uClibc.git;protocol=git;branch=nptl \
file://uclibc-arm-ftruncate64.patch;patch=1 \
file://ldso_use_arm_dl_linux_resolve_in_thumb_mode.patch;patch=1 \
file://uclibc_arm_remove_duplicate_sysdep_defs.patch;patch=1 \
- file://uclibc_rpc_thread.patch;patch=1 \
file://installfix.patch;patch=1 \
"
diff --git a/recipes/xorg-driver/xf86-input-tslib/dontfloodevents006.patch b/recipes/xorg-driver/xf86-input-tslib/dontfloodevents006.patch
new file mode 100644
index 0000000000..e989717d3b
--- /dev/null
+++ b/recipes/xorg-driver/xf86-input-tslib/dontfloodevents006.patch
@@ -0,0 +1,22 @@
+Index: xf86-input-tslib-0.0.6/src/tslib.c
+===================================================================
+--- xf86-input-tslib-0.0.6.orig/src/tslib.c 2009-11-29 20:03:29.734794324 +0000
++++ xf86-input-tslib-0.0.6/src/tslib.c 2009-11-29 20:29:24.066794215 +0000
+@@ -205,7 +205,7 @@
+ */
+ switch (priv->state) {
+ case BUTTON_EMULATION_OFF :
+- if(priv->lastp != samp.pressure) {
++ if(!!priv->lastp != !!samp.pressure) {
+ priv->lastp = samp.pressure;
+ xf86PostButtonEvent(local->dev, TRUE,
+ 1, !!samp.pressure, 0, 2,
+@@ -512,7 +512,7 @@
+ s = xf86CheckStrOption(dev->commonOptions, "path", NULL);
+ if (!s)
+ s = xf86CheckStrOption(dev->commonOptions, "Device", NULL);
+-
++
+ priv->ts = ts_open(s, 1);
+ xfree(s);
+
diff --git a/recipes/xorg-driver/xf86-input-tslib_0.0.6.bb b/recipes/xorg-driver/xf86-input-tslib_0.0.6.bb
index 785e5f4e5e..e86f0c75e6 100644
--- a/recipes/xorg-driver/xf86-input-tslib_0.0.6.bb
+++ b/recipes/xorg-driver/xf86-input-tslib_0.0.6.bb
@@ -4,7 +4,10 @@ DESCRIPTION = "X.Org X server -- tslib input driver"
RRECOMMENDS += "hal tslib-calibrate"
DEPENDS += "tslib"
+PR = "r2"
+
SRC_URI = "http://www.pengutronix.de/software/xf86-input-tslib/download/xf86-input-tslib-${PV}.tar.bz2 \
+ file://dontfloodevents006.patch;patch=1 \
file://10-x11-input-tslib.fdi"
do_configure_prepend() {
@@ -17,4 +20,3 @@ do_install_append() {
}
FILES_${PN} += "${datadir}/hal"
-