aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrans Meulenbroeks <fransmeulenbroeks@gmail.com>2009-12-14 21:42:56 +0100
committerFrans Meulenbroeks <fransmeulenbroeks@gmail.com>2009-12-14 21:42:56 +0100
commit89b4cc86d929b04c5902e81e91cb9a6a7c913524 (patch)
treef8a2eecf604d45c9a4fa676d9e1d3cfab1d5db67
parentd555e8e394e71c88f2bec64f9b212c7fe07c5466 (diff)
parent3f954a2253b2419bff27a905ca2996e0a6057ca8 (diff)
downloadopenembedded-89b4cc86d929b04c5902e81e91cb9a6a7c913524.tar.gz
Merge branch 'org.openembedded.dev' of git.openembedded.org:openembedded into org.openembedded.dev
-rw-r--r--conf/checksums.ini70
-rw-r--r--conf/compatibility-providers.conf2
-rw-r--r--conf/distro/include/preferred-om-2008-versions.inc4
-rw-r--r--conf/distro/include/preferred-shr-versions.inc3
-rw-r--r--conf/distro/include/sane-srcrevs-fso.inc2
-rw-r--r--conf/distro/include/sane-srcrevs.inc9
-rw-r--r--conf/machine/da850-omapl138-evm.conf2
-rw-r--r--conf/machine/sgh-i900.conf1
-rw-r--r--recipes/aceofpenguins/aceofpenguins-launcher_0.4.bb (renamed from recipes/aceofpenguins/aceofpenguins-launcher_0.3.bb)2
-rw-r--r--recipes/e17/e-tasks_svn.bb10
-rw-r--r--recipes/fbreader/fbreader-0.12.1/Makefile.patch22
-rw-r--r--recipes/fbreader/fbreader_0.12.1.bb35
-rw-r--r--recipes/gabriel/gabriel_svn.bb2
-rw-r--r--recipes/intone-video/intone-video_svn.bb19
-rw-r--r--recipes/intone/intone_svn.bb10
-rw-r--r--recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch53
-rw-r--r--recipes/iproute2/iproute2.inc2
-rw-r--r--recipes/iproute2/iproute2_2.6.18.bb2
-rw-r--r--recipes/iproute2/iproute2_2.6.20.bb2
-rw-r--r--recipes/iproute2/iproute2_2.6.22.bb2
-rw-r--r--recipes/iproute2/iproute2_2.6.29.bb3
-rw-r--r--recipes/kexec/kexec-tools-klibc-static_2.0.1.bb7
-rw-r--r--recipes/klibc/klibc-1.5.15/isystem.patch13
-rw-r--r--recipes/klibc/klibc_1.5.15.bb2
-rw-r--r--recipes/klibc/klibc_1.5.15.inc3
-rw-r--r--recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch29
-rw-r--r--recipes/linux/linux-2.6.22/mx31moboard/defconfig (renamed from recipes/linux/linux-2.6.22.6/mx31moboard/defconfig)0
-rw-r--r--recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch19
-rw-r--r--recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch8567
-rw-r--r--recipes/linux/linux-2.6.23/time.h.patch (renamed from recipes/linux/linux-2.6.24/time.h.patch)0
-rw-r--r--recipes/linux/linux-2.6.25/ronetix-pm9261/defconfig (renamed from recipes/linux/linux-2.6.25.20/ronetix-pm9261/defconfig)0
-rw-r--r--recipes/linux/linux-2.6.25/ronetix-pm9263/defconfig (renamed from recipes/linux/linux-2.6.25.20/ronetix-pm9263/defconfig)0
-rw-r--r--recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch49
-rw-r--r--recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch54
-rw-r--r--recipes/linux/linux-sgh-i900/sgh-i900-support.patch13031
-rw-r--r--recipes/linux/linux-sgh-i900/sgh_i900_defconfig414
-rw-r--r--recipes/linux/linux-sgh-i900/wm97xx-ts-fix.patch130
-rw-r--r--recipes/linux/linux-sgh-i900_2.6.32.bb (renamed from recipes/linux/linux-sgh-i900_2.6.29.bb)10
-rw-r--r--recipes/linux/linux_2.6.18.bb3
-rw-r--r--recipes/linux/linux_2.6.20.bb2
-rw-r--r--recipes/linux/linux_2.6.21+2.6.22-rc1.bb2
-rw-r--r--recipes/linux/linux_2.6.21.bb3
-rw-r--r--recipes/linux/linux_2.6.22+2.6.23-rc3.bb2
-rw-r--r--recipes/linux/linux_2.6.22+2.6.23-rc5.bb2
-rw-r--r--recipes/linux/linux_2.6.22.6.bb31
-rw-r--r--recipes/linux/linux_2.6.22.bb6
-rw-r--r--recipes/linux/linux_2.6.23+2.6.24-rc5.bb2
-rw-r--r--recipes/linux/linux_2.6.23+2.6.24-rc6.bb2
-rw-r--r--recipes/linux/linux_2.6.23.bb13
-rw-r--r--recipes/linux/linux_2.6.24.bb4
-rw-r--r--recipes/linux/linux_2.6.25.20.bb33
-rw-r--r--recipes/linux/linux_2.6.25.bb17
-rw-r--r--recipes/linux/linux_2.6.26.bb6
-rw-r--r--recipes/linux/linux_2.6.27.bb3
-rw-r--r--recipes/linux/linux_2.6.28.bb3
-rw-r--r--recipes/linux/linux_2.6.29+2.6.30-rc5.bb2
-rw-r--r--recipes/linux/linux_2.6.29.bb5
-rw-r--r--recipes/linux/linux_2.6.30.bb4
-rw-r--r--recipes/linux/linux_2.6.31.bb4
-rw-r--r--recipes/linux/linux_2.6.32.bb3
-rw-r--r--recipes/openmoko-3rdparty/guitartune_svn.bb28
-rw-r--r--recipes/powervr-drivers/libgles-omap3.inc1
-rw-r--r--recipes/qt4/qt-4.6.0.inc2
-rw-r--r--recipes/qt4/qt4-embedded-gles_4.6.0.bb2
-rw-r--r--recipes/qt4/qt4-embedded_4.6.0.bb1
-rw-r--r--recipes/qt4/qt4-x11-free-gles_4.6.0.bb2
-rw-r--r--recipes/qt4/qt4-x11-free_4.6.0.bb1
-rw-r--r--recipes/qt4/qt4.inc56
-rw-r--r--recipes/qt4/wolfenqt-e_git.bb5
-rw-r--r--recipes/qt4/wolfenqt.inc19
-rw-r--r--recipes/qt4/wolfenqt_git.bb5
-rw-r--r--recipes/shr/initscripts-shr/palmpre/usb-gadget.sh10
-rw-r--r--recipes/shr/initscripts-shr_0.0.1.bb8
-rw-r--r--recipes/shr/phoneuid_git.bb4
-rw-r--r--recipes/shr/shr-launcher_svn.bb10
-rw-r--r--recipes/tasks/task-shr-feed.bb1
-rw-r--r--recipes/tasks/task-shr-minimal.bb3
-rw-r--r--recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch12
-rw-r--r--recipes/uclibc/uclibc.inc3
-rw-r--r--recipes/uclibc/uclibc_nptl.bb3
80 files changed, 22251 insertions, 662 deletions
diff --git a/conf/checksums.ini b/conf/checksums.ini
index 3bf98541fe..8bbcd06b22 100644
--- a/conf/checksums.ini
+++ b/conf/checksums.ini
@@ -58,6 +58,14 @@ sha256=8930ebfdc8a606d8cb26f073d4700460c3289fb79e943e12948329e17336ca47
md5=779472ae02c2a99937879a8d1d4b9b25
sha256=cfb98e7635c985733dba0fb9c3cadee22ab70fb3b0db7eac8eacaebc65c92a59
+[http://maxim.org.za/AT91RM9200/2.6/2.6.22-rc1-at91.patch.gz]
+md5=2453815aba40e9487d24822d769fbab1
+sha256=95cec79fb6db261760421c1bb9df8a0f4955e0ee90e08cb9930a4c7a2482c1a6
+
+[http://maxim.org.za/AT91RM9200/2.6/2.6.23-rc3-at91.patch.gz]
+md5=822f2f85b658fb1f39b8a20fab781cfc
+sha256=e8ead43fa562cc76ac34d0d4841fd1e4f4964a830403801433e34961d1ce0e84
+
[http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz]
md5=4469d6336f9659f1725fedd4a52261ad
sha256=7a960180e7873b1bdb522e76b0423b5c2c1b8efe1d30d7ca80c41eb97d822b2d
@@ -1542,9 +1550,9 @@ sha256=68b1d0acd1a6e17d91412635cd4f65ba58d293e62a01475a43f3712c49a46e7d
md5=03e5e7ab8ac3acc59661c6e9c09089b7
sha256=fcda8bca508490bea642c83fcf718565bf4ed4c50f2d7b34761da61fe2e6bc9d
-[http://downloads.vdm-design.de/aceofpenguins-launcher-0.3.tar.gz]
-md5=2fe7dcdbbdbf3b2821f627e02c406caa
-sha256=13013cd19cb165825f4ef5cc790b0e103705f68ece470fd12012db97e3e60839
+[http://downloads.vdm-design.de/aceofpenguins-launcher-0.4.tar.gz]
+md5=40f19a26cbfa35de8346efe582b5d984
+sha256=ea09581b3a31e3409968876461da3e384dcb0000abbb0060ae14e4fccb4994e1
[http://downloads.sourceforge.net/acpid/acpid-1.0.10.tar.gz]
md5=61156ef32015c56dc0f2e3317f4ae09e
@@ -6734,6 +6742,10 @@ sha256=e4db7a6305ffe2333fae08c940ded8f7e5b02999e0917b0ea4ef3764c80f58c8
md5=145f4d2ba24c54288bad2d66ddd2baf2
sha256=d7bcc7cafb1c78f00b380dc2facdb82c5a2fb1475e1ddfc8e5d44a2b855cec1a
+[http://www.fbreader.org/fbreader-sources-0.12.1.tgz]
+md5=7236d094a91f26d19a3cfd3db8fcf946
+sha256=02a6a143abf3b5ab875392acef8d67f5cc991ea364e3bd250628e6a31b1f9923
+
[http://only.mawhrin.net/fbreader/obsolete/fbreader-sources-0.7.3d.tgz]
md5=705a89bb03860fb312a7afa25db0310e
sha256=ec9f93968147526a9b2dd17e31a6d2795356b2eeed309f796f655b37b98abd83
@@ -10962,6 +10974,10 @@ sha256=01ba0c34c3bf3bc4ea7728550e3bcfca779a48907ea4d6d9e5a83e8678df5096
md5=dbb085088337e2dd8c0216c36523ea06
sha256=68a114ece326b258b26259d31b9bb59c10049ff0162bcaa0f4f7a7dea9d244dc
+[http://gupnp.org/sources/gssdp/gssdp-0.7.1.tar.gz]
+md5=725c32e8f92a072cc34f0e091937df2a
+sha256=8eaab799f699836770ec2fcc08abfef2f824a82ae959c6af7b39ffb6968b9fd7
+
[http://gstreamer.freedesktop.org/src/gst-ffmpeg/gst-ffmpeg-0.10.1.tar.bz2]
md5=e21aef9a84d67dea9a68c1379781f763
sha256=a2c877c38d057875c7dfbf1803030c5cc9707020b77f91673500d6ea8d858607
@@ -11578,6 +11594,10 @@ sha256=f78e4b0a361b67805892c1a0e72f3cef92fbc96112157895660a478979ddeef4
md5=bfb12195c76bb6632bd917f2c2bc12d6
sha256=241e416cbe2c02f413fde82a8587bfe2fe9915fbed3a6fb20c86520b8d7543ef
+[http://gupnp.org/sources/gupnp/gupnp-0.13.1.tar.gz]
+md5=9b5fcf8146ba9a2bd84382f61717aa0e
+sha256=e97faaebf0da42617a43de4c7c1148a51148f2a2cdaa2a10855e377b968a07fd
+
[http://gupnp.org/sources/gupnp-av/gupnp-av-0.2.tar.gz]
md5=ab485bf263d0a3d2f771817241c970b9
sha256=782e4e45abcba1b3fe34276580653f4dbfbe8a26eee69a290675dfa7faa309f8
@@ -11586,6 +11606,10 @@ sha256=782e4e45abcba1b3fe34276580653f4dbfbe8a26eee69a290675dfa7faa309f8
md5=f6e813591ff89e8e61a46f416046450f
sha256=34b6e104b480e501e430daa68fca63906a939a6cb02bc43814ed06d2856a72ac
+[http://gupnp.org/sources/gupnp-av/gupnp-av-0.5.2.tar.gz]
+md5=15ccfbb17553bf1cb00bf8e1d801005e
+sha256=dfd438f40e31047d6f06db30db05d5f876c6294a8509f170482d712f552e9892
+
[http://gupnp.org/sources/gupnp-tools/gupnp-tools-0.6.1.tar.gz]
md5=27d3a55eae2243661f49a7cf40e1e195
sha256=c4004de997674bb54d5844391dd824c6aee78184b075e7ea5d8f816e0d031e25
@@ -17378,6 +17402,10 @@ sha256=d7b9f19b92fd5c693c16cd62f441d051b699f28ec6a175d1b464e58bacd8c78f
md5=84c077a37684e4cbfa67b18154390d8a
sha256=0acd83f7b85db7ee18c2b0b7505e1ba6fd722c36f49a8870a831c851660e3512
+[http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.32.tar.bz2]
+md5=260551284ac224c3a43c4adac7df4879
+sha256=5099786d80b8407d98a619df00209c2353517f22d804fdd9533b362adcb4504e
+
[http://kernel.org//pub/linux/kernel/v2.6/linux-2.6.9.tar.bz2]
md5=e921200f074ca97184e150ef5a4af825
sha256=f5dba6366e87e91234d1b0069cfea655b0a4cb37ea97f899226f16998e6ab9f1
@@ -21026,6 +21054,10 @@ sha256=34beecc0dd156267e8004fb79efea9bf97e1157ed597bdde1841c16def2e9195
md5=955088e5139ef5914d44fe15b4a50b90
sha256=924c27b95f0c3792bf3d48ff854ef145e8916452b917067d653f59102f03c0d4
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.18.8.bz2]
+md5=090f582d2a0e1951d500b2e55f7df7b4
+sha256=cde777361d2a4818ea9c215e195a87da4847dafa94a10ac8c9f4bd8dc49fde3f
+
[http://www.muru.com/linux/omap/patches/patch-2.6.19-omap1.bz2]
md5=3590e42e1a6ea4676df5b187b830b402
sha256=b7cba2e38e81abb4d1d33c3f7f7c028310b392d5ee1ed5e3b2137e024598d903
@@ -21054,6 +21086,10 @@ sha256=808ca62a66d7cfe40123301c2f51fc0dcd817ee3bb0df96d1e9e97cc3bad6a9c
md5=b9c8734471a454806c77f040fcf9869b
sha256=5ee24e1c5636bcffed155b1c01d7d09fedb135fa2458c190a0da03a82c8c2f60
+[http://kernel.org/pub/linux/kernel/v2.6/testing/v2.6.22/patch-2.6.22-rc1.bz2]
+md5=9bc06492dce31c87f1cdfa2ce5b0cf4c
+sha256=dd33f3e9059bed043194ee5200239f26d3ad607ab5c872e7ce92595c1eb5d0e4
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.22.19.bz2]
md5=066cc3bdd2783dcd01f6ff466e449ec0
sha256=829c48b49c71d89468f2a5a05587714811197545eeba31e9643cabacf344d33a
@@ -21082,6 +21118,10 @@ sha256=c2085fc8fc6df586ef8c19a4562b84162f0b77956d691aa4fbee5e90c9800cb7
md5=736ea68a03158c24e55aa95e0ab15ceb
sha256=4d2c13dee5ea7bd8b5cdbf63afa9383b45f6bad1f75b163c49e086a5030a04de
+[http://kernel.org/pub/linux/kernel/v2.6/testing/v2.6.23/patch-2.6.23-rc5.bz2]
+md5=8253467313749aee6065093cd3c5fd9c
+sha256=c8c2068183aca79c46182f3d3fe6d7579cd60809681d42c52d71cf1873cd1a0e
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.23.1.bz2]
md5=896c5d4e7fd68d37b8c16e5d2842563b
sha256=55e811b4d4b0cbfde500bdd5455b7180d3def3dbdc52314520b7327d32b23f42
@@ -21178,6 +21218,10 @@ sha256=7fc735b85225850dae3c3acfe4bf0fe59f4c884d7c37f89395867dc73bf8f8af
md5=e75d87c11065955871b2e005d0242c99
sha256=0e5e633e81f1dcc0dcd2372494fbd2d6a72b28bb177a3285c4766a9659b454bd
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.27.41.bz2]
+md5=a431f1f68badcfa5a6cecc6b4d52d319
+sha256=ed3ec802ccb3585bf0a57724471fa24b98cce5caef8f35d6c26fed7c9213168d
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.27.8.bz2]
md5=ec23e3dce22b23ca681199fe515f10fb
sha256=31c35db09289c6e0436a258745d7180e0cd8f567949f27b3dab5a57a3664ed2f
@@ -21222,6 +21266,10 @@ sha256=25a9aff47cc568e4bcaa4377cacbcae11ea454aeeea9519aa3a1b6dbffea713c
md5=2f399a5e286a9fe7cb40bfd3d42a7a3d
sha256=79a9913a74e58af6431bb952aac2cf0a1f4422287f420844f24ca6bc5ed0fdc4
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.30.10.bz2]
+md5=6485fe0cf0f0220493647505bfd2f7b0
+sha256=a1ffb806d7d0083aa8d0525cbccede4172f4a44c8df1ddfeece629b6d8304201
+
[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.30.4.bz2]
md5=d0fc44b54ba5953140b3f2aa9a1f2580
sha256=b7716971e73c8fa96ecd9cdb598c8bd3a2a946e289cfef5dcfaa11a0022737ce
@@ -21250,6 +21298,18 @@ sha256=bc670682ed9b81d5d3859130c600601bd72053fd738b51b2daf8ddf3f4614a66
md5=6cac5e59d5562b591cdda485941204d5
sha256=41e7d98a205d58a62901daf4e46ecf5fb0b177e5a233a3c0ad3250a3a0abe8aa
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.31.7.bz2]
+md5=801b9c379a12339c0210dc5d1a8e9537
+sha256=f9edbb3c15c4094d840d5954131bb91c4513d333b30bced7b5c906d36c843d5e
+
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.31.8.bz2]
+md5=0fda994c76a981a67464f43c766f2180
+sha256=c120bf7018749fca765773975b3d4444fa244781ceb844e4b9bab9b478cad213
+
+[http://kernel.org/pub/linux/kernel/v2.6/patch-2.6.32.1.bz2]
+md5=abc24a9beb8431a75301dd3884b37a3c
+sha256=26f6fad3259ad6d2356cfa04462ace5fd3b5cecec410bbcefe7d5f6dbeb00baa
+
[http://www.muru.com/linux/omap/patches/patch-2.6.9-omap1.bz2]
md5=d6249654087f0bcafaa860ac573316a4
sha256=91806347cb386002a8bfd20ee66e536e4a7dfb01f207dd751341f2971090d9ac
@@ -23862,6 +23922,10 @@ sha256=04ecd2577ee0c210df610b4a5d870f2bda57c80962fb5b51ae9c4a94098c726c
md5=7a0c73ccf0e31808ad37b61e730cd10e
sha256=33d2363d40b1db7568864dc063ce4af70e50a9f9292a16445f0b167618d8fbba
+[http://ftp.acc.umu.se/pub/GNOME/sources/rygel/0.4/rygel-0.4.6.tar.bz2]
+md5=f28c48e2caf79b8e9056636259056de7
+sha256=67c58590430d04799644d164e4a5977356d95528864ea787356e35dd314e660f
+
[http://www.informatik.hu-berlin.de/~tkunze/zaurus/patches/sa1100-dma.patch]
md5=4336cca098b577608813a7c1454f2edd
sha256=c532c14ffb9358d1e0dbb67fce113c6aec0dd9c77ad41875222d69979baac9e7
diff --git a/conf/compatibility-providers.conf b/conf/compatibility-providers.conf
index d2ee2b6a23..10a81f2943 100644
--- a/conf/compatibility-providers.conf
+++ b/conf/compatibility-providers.conf
@@ -61,7 +61,7 @@ PREFERRED_PROVIDER_virtual/gail ?= "gtk+"
PREFERRED_PROVIDER_virtual/javac-native ?= "ecj-bootstrap-native"
PREFERRED_PROVIDER_virtual/java-initial ?= "jamvm-initial"
PREFERRED_PROVIDER_virtual/java-native ?= "jamvm-native"
-PREFERRED_PROVIDER_virtual/libgl ?= "mesa-dri"
+PREFERRED_PROVIDER_virtual/libgl ?= "mesa"
PREFERRED_PROVIDER_virtual/libsdl ?= "libsdl-x11"
#PREFERRED_PROVIDER_virtual/libusb0 ?= "libusb"
PREFERRED_PROVIDER_virtual/libusb0 ?= "libusb-compat"
diff --git a/conf/distro/include/preferred-om-2008-versions.inc b/conf/distro/include/preferred-om-2008-versions.inc
index 7cc923cc60..d881202b09 100644
--- a/conf/distro/include/preferred-om-2008-versions.inc
+++ b/conf/distro/include/preferred-om-2008-versions.inc
@@ -175,7 +175,7 @@ PREFERRED_VERSION_comprec ?= "0.02"
PREFERRED_VERSION_confuse ?= "2.5"
PREFERRED_VERSION_confuse-native ?= "2.5"
PREFERRED_VERSION_connect ?= "0.1"
-PREFERRED_VERSION_connman ?= "0.10"
+PREFERRED_VERSION_connman ?= "0.15"
PREFERRED_VERSION_conserver ?= "8.1.14"
PREFERRED_VERSION_console-tools ?= "0.3.2"
PREFERRED_VERSION_contacts ?= "0.7"
@@ -1963,7 +1963,7 @@ PREFERRED_VERSION_wlan-ng-modules ?= "0.2.7"
PREFERRED_VERSION_wlan-ng-utils ?= "0.2.7"
PREFERRED_VERSION_wmctrl ?= "1.07"
PREFERRED_VERSION_wpa-gui ?= "0.4.8"
-PREFERRED_VERSION_wpa-supplicant ?= "0.6.3"
+PREFERRED_VERSION_wpa-supplicant ?= "0.6.9"
PREFERRED_VERSION_wpa-supplicant-nossl ?= "0.2.6"
PREFERRED_VERSION_wpa-supplicant-ssl ?= "0.2.6"
PREFERRED_VERSION_wrt-imagetools-native ?= "1.0"
diff --git a/conf/distro/include/preferred-shr-versions.inc b/conf/distro/include/preferred-shr-versions.inc
index f4ddf2fa25..fd83e02afe 100644
--- a/conf/distro/include/preferred-shr-versions.inc
+++ b/conf/distro/include/preferred-shr-versions.inc
@@ -17,8 +17,7 @@ PREFERRED_VERSION_classpath-native = "0.98"
PREFERRED_VERSION_linux-libc-headers = "2.6.31"
# override EFL_SRCREV from sane-srcrevs.inc
-# now is EFL_SRCREV newer in sane-srcrevs.inc
-# EFL_SRCREV ?= "43898"
+EFL_SRCREV ?= "44424"
# specifically set an openssh version
# NOTE: whenever changing the version here make sure
diff --git a/conf/distro/include/sane-srcrevs-fso.inc b/conf/distro/include/sane-srcrevs-fso.inc
index 80f8bfeedc..d975ec261a 100644
--- a/conf/distro/include/sane-srcrevs-fso.inc
+++ b/conf/distro/include/sane-srcrevs-fso.inc
@@ -30,7 +30,7 @@ SRCREV_pn-fso-specs ?= "14de522adbea80416df811085b3112cd1d5d5336"
SRCREV_pn-gsmd2 ?= "c16883a079aeff8780e5d461ec4e8348537ab4d8"
SRCREV_pn-libeflvala ?= "d07db4fbd24c9d5dfc9b1fd5024fd651b02f123e"
SRCREV_pn-libfso-glib ?= "9a627aa1c33f5a46ae4316fc274126ca5f37e979"
-SRCREV_pn-libframeworkd-glib ?= "52c988638e517e0f06ea7841b80692c729a9f487"
+SRCREV_pn-libframeworkd-glib ?= "e5cc248f241ea549810051fb95f0250bd1224cf9"
SRCREV_pn-libgsm0710 ?= "cd564c8782f018e0d65fb8716c99a6040b5bd166"
SRCREV_pn-libgsm0710mux ?= "e81ed512ec86e31d0d0119826afa9d1302651693"
SRCREV_pn-libpersistence ?= "26180fd3c0fe4eb6abb7440f10e51d997719b97a"
diff --git a/conf/distro/include/sane-srcrevs.inc b/conf/distro/include/sane-srcrevs.inc
index 3c3b671574..895f5519d2 100644
--- a/conf/distro/include/sane-srcrevs.inc
+++ b/conf/distro/include/sane-srcrevs.inc
@@ -46,7 +46,7 @@ SRCREV_pn-dfu-util-native ?= "4160"
SRCREV_pn-disko ?= "f52597b8d5d584811cbe8f9e0bf25ea372526953"
SRCREV_pn-diversity-daemon ?= "571"
SRCREV_pn-diversity-radar ?= "453"
-SRCREV_pn-e-tasks ?= "18"
+SRCREV_pn-e-tasks ?= "22"
SRCREV_pn-e-wm-config-illume-shr ?= "bbcec18f0ebd47e4f6eea88b9b774edf7400e752"
SRCREV_pn-e-wm-illume-dict-pl ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-e-wm-menu-shr ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
@@ -104,14 +104,15 @@ SRCREV_pn-gpe-theme-neo ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-gridpad ?= "194"
SRCREV_pn-gtk-theme-neo ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-gtkhtml2 ?= "1158"
+SRCREV_pn-guitartune ?= "11"
SRCREV_pn-gypsy ?= "134"
SRCREV_pn-hildon-1 ?= "14429"
SRCREV_pn-icon-theme-neo ?= "1cc80e26a4558dfc2268b349d9a1f468e515bcfb"
SRCREV_pn-illume-keyboards-shr ?= "c23bde500152c303971b4c2ec5c463d760b7cc14"
SRCREV_pn-illume-theme-asu ?= "4881"
SRCREV_pn-illume-theme-freesmartphone ?= "b1b0f6adc59e6f72a3929771058e3750bf181bc5"
-SRCREV_pn-intone ?= "66"
-SRCREV_pn-intone-video ?= "9"
+SRCREV_pn-intone ?= "75"
+SRCREV_pn-intone-video ?= "12"
SRCREV_pn-intuition ?= "194"
SRCREV_pn-kismet ?= "2285"
SRCREV_pn-kismet-newcore ?= "2285"
@@ -292,7 +293,7 @@ SRCREV_pn-shr-config ?= "37dd7ac950e2bfd438801faf34c29fccfdbbaccf"
SRCREV_pn-shr-contacts ?= "9d7ca1cecb93022e5b890cd87756ac6f072710ca"
SRCREV_pn-shr-dialer ?= "9d7ca1cecb93022e5b890cd87756ac6f072710ca"
SRCREV_pn-shr-installer ?= "f17fa104639113fb0d3212b6bba366c092854cde"
-SRCREV_pn-shr-launcher ?= "92"
+SRCREV_pn-shr-launcher ?= "99"
SRCREV_pn-shr-messages ?= "9d7ca1cecb93022e5b890cd87756ac6f072710ca"
SRCREV_pn-shr-settings ?= "ef06fe86c49958673889671c46682c0b2f1d74d7"
SRCREV_pn-shr-specs ?= "a881cd133439737708d4d4d150500246ceff7c7d"
diff --git a/conf/machine/da850-omapl138-evm.conf b/conf/machine/da850-omapl138-evm.conf
index b61c00ca27..aaf7583e5d 100644
--- a/conf/machine/da850-omapl138-evm.conf
+++ b/conf/machine/da850-omapl138-evm.conf
@@ -3,7 +3,7 @@
#@DESCRIPTION: Machine configuration for the TI DA850/OMAPL138 EVM board
require conf/machine/include/davinci.inc
-require conf/machine/include/omapl138.inc
+require conf/machine/include/omapl138.conf
UBOOT_MACHINE = "da850_omapl138_evm_config"
UBOOT_ENTRYPOINT = "0xc0008000"
diff --git a/conf/machine/sgh-i900.conf b/conf/machine/sgh-i900.conf
index 2080bf8e1a..4d3cd5fcee 100644
--- a/conf/machine/sgh-i900.conf
+++ b/conf/machine/sgh-i900.conf
@@ -11,6 +11,7 @@ TARGET_ARCH = "arm"
require conf/machine/include/tune-xscale.inc
MACHINE_FEATURES = "alsa apm bluetooth camera gps kernel26 phone screen touchscreen vfat wifi"
+MACHINE_EXTRA_RRECOMMENDS = " kernel-modules"
# Software/packages selection
#
diff --git a/recipes/aceofpenguins/aceofpenguins-launcher_0.3.bb b/recipes/aceofpenguins/aceofpenguins-launcher_0.4.bb
index 2e96d81475..265b8c6651 100644
--- a/recipes/aceofpenguins/aceofpenguins-launcher_0.3.bb
+++ b/recipes/aceofpenguins/aceofpenguins-launcher_0.4.bb
@@ -8,7 +8,7 @@ SECTION = "x11/application"
PACKAGE_ARCH = "all"
-PR = "r1"
+PR = "r0"
inherit setuptools
diff --git a/recipes/e17/e-tasks_svn.bb b/recipes/e17/e-tasks_svn.bb
index 9556e146d6..5c07a1bd3c 100644
--- a/recipes/e17/e-tasks_svn.bb
+++ b/recipes/e17/e-tasks_svn.bb
@@ -10,14 +10,8 @@ inherit autotools
PV = "0.0.1+svnr${SRCPV}"
PR = "r1"
-SRC_URI = "svn://e-tasks.googlecode.com/svn/trunk;module=.;proto=http"
-S = "${WORKDIR}"
-
-do_configure_prepend() {
- # all links to /usr/share/automake-1.10/
- rm -f ${S}/depcomp ${S}/config.guess ${S}/config.sub ${S}/INSTALL ${S}/install-sh ${S}/missing
- touch ${S}/INSTALL
-}
+SRC_URI = "svn://e-tasks.googlecode.com/svn;module=trunk;proto=http"
+S = "${WORKDIR}/trunk"
do_install_append() {
install -d "${D}/${datadir}/pixmaps"
diff --git a/recipes/fbreader/fbreader-0.12.1/Makefile.patch b/recipes/fbreader/fbreader-0.12.1/Makefile.patch
new file mode 100644
index 0000000000..0e498c7125
--- /dev/null
+++ b/recipes/fbreader/fbreader-0.12.1/Makefile.patch
@@ -0,0 +1,22 @@
+diff -uri fbreader-0.12.1.orig/fbreader/Makefile fbreader-0.12.1/fbreader/Makefile
+--- fbreader-0.12.1.orig/fbreader/Makefile 2009-12-13 09:03:11.000000000 +0100
++++ fbreader-0.12.1/fbreader/Makefile 2009-12-14 17:18:47.548638783 +0100
+@@ -37,7 +37,6 @@
+ @install $(TARGET) $(DESTDIR)$(BINDIR)/FBReader
+ @install -d $(FBSHAREDIR)
+ @install -d $(FBSHAREDIR)/help
+- @./scripts/install_help.sh $(VARIANT) $(FBSHAREDIR)/help
+ @install -d $(FBSHAREDIR)/network
+ @install -m 0644 $(wildcard data/network/*.xml) $(FBSHAREDIR)/network
+ @install -d $(FBSHAREDIR)/network/certificates
+@@ -58,8 +57,8 @@
+ @install -d $(FBSHAREDIR)/resources
+ @install -m 0644 $(wildcard data/resources/*.xml) $(FBSHAREDIR)/resources
+ @install -d $(DESTDIR)$(APPIMAGEDIR_REAL)
+- @install -m 0644 $(wildcard data/icons/toolbar/$(VARIANT)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
+- @install -m 0644 $(wildcard data/icons/filetree/$(VARIANT)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
++ @install -m 0644 $(wildcard data/icons/toolbar/$(TARGET_ARCH)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
++ @install -m 0644 $(wildcard data/icons/filetree/$(TARGET_ARCH)/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
+ @install -m 0644 $(wildcard data/icons/booktree/new/*.*) $(DESTDIR)$(APPIMAGEDIR_REAL)
+ @make -C $(TARGET_ARCH) RESOLUTION=$(RESOLUTION) install
+
diff --git a/recipes/fbreader/fbreader_0.12.1.bb b/recipes/fbreader/fbreader_0.12.1.bb
new file mode 100644
index 0000000000..c1c3025a1a
--- /dev/null
+++ b/recipes/fbreader/fbreader_0.12.1.bb
@@ -0,0 +1,35 @@
+DESCRIPTION = "FBreader is an ebook reader"
+HOMEPAGE = "http://www.fbreader.org"
+SECTION = "x11/utils"
+PRIORITY = "optional"
+LICENSE = "GPLv2"
+DEPENDS = "gtk+ enca expat bzip2 libgpewidget virtual/libiconv liblinebreak libfribidi"
+
+SRC_URI = "http://www.fbreader.org/fbreader-sources-${PV}.tgz \
+file://Makefile.patch;patch=1"
+
+# Set the defaults
+READER_RESOLUTION ?= "1024x600"
+READER_ARCH ?= "desktop"
+READER_UI ?= "gtk"
+READER_STATUS ?= "release"
+
+FILES_${PN} += "${datadir}/FBReader ${datadir}/zlibrary ${libdir}/zlibrary"
+
+CFLAGS_append = " RESOLUTION=${READER_RESOLUTION} INSTALLDIR=${prefix}"
+EXTRA_OEMAKE = "CC='${CXX}' LD='${CXX}' OE_CFLAGS='${CXXFLAGS}' INCPATH='${STAGING_INCDIR}' LIBPATH='${STAGING_LIBDIR}'"
+
+inherit pkgconfig
+
+do_configure() {
+ cd ${WORKDIR}/${PN}-${PV}
+ mv makefiles/target.mk makefiles/target.mk.orig
+
+ echo "TARGET_ARCH = ${READER_ARCH}" > makefiles/target.mk
+ echo "UI_TYPE = ${READER_UI}" >> makefiles/target.mk
+ echo "TARGET_STATUS = ${READER_STATUS}" >> makefiles/target.mk
+}
+
+do_install() {
+ oe_runmake install DESTDIR=${D} RESOLUTION=${READER_RESOLUTION}
+}
diff --git a/recipes/gabriel/gabriel_svn.bb b/recipes/gabriel/gabriel_svn.bb
index 94fdfe99e7..f525885fb0 100644
--- a/recipes/gabriel/gabriel_svn.bb
+++ b/recipes/gabriel/gabriel_svn.bb
@@ -2,7 +2,7 @@ DESCRIPTION = "Gabriel is a small utility to enable D-Bus clients to connect to
daemon running on a remote machine, through SSH. In simple words, gabriel is a proxy for \
a dbus daemon running on a remote machine."
LICENSE = "GPL"
-DEPENDS = "libssh glib-2.0 dbus glib-dbus"
+DEPENDS = "libssh glib-2.0 dbus dbus-glib"
SECTION = "console/network"
PV = "0.0.0+svnr${SRCPV}"
diff --git a/recipes/intone-video/intone-video_svn.bb b/recipes/intone-video/intone-video_svn.bb
index 30ac70acd2..0f7979f122 100644
--- a/recipes/intone-video/intone-video_svn.bb
+++ b/recipes/intone-video/intone-video_svn.bb
@@ -9,26 +9,15 @@ RDEPENDS = "mplayer lame libxv libsdl-x11"
PV = "0.13+svnr${SRCPV}"
PR = "r1"
-SRC_URI = "svn://intone-video.googlecode.com/svn/trunk;module=.;proto=http"
-S = "${WORKDIR}"
+SRC_URI = "svn://intone-video.googlecode.com/svn;module=trunk;proto=http"
+S = "${WORKDIR}/trunk"
inherit autotools
-do_configure_prepend() {
- rm -f "${S}/INSTALL"
- touch "${S}/INSTALL"
- sed -i 's/intone/intone-video/g' ${S}/configure.ac
- sed -i 's/\/doc\/intone$/\/share\/doc\/intone-video/g' ${S}/Makefile.am
- sed -i '/^EXTRA_DIST = $(glade_DATA)/d' ${S}/src/Makefile.am
- sed -i '/^gladedir = $(datadir)\/intone\/glade/d' ${S}/src/Makefile.am
- sed -i '/^glade_DATA = intone.glade/d' ${S}/src/Makefile.am
-}
-
do_install_append() {
- mv ${D}/${bindir}/intone ${D}/${bindir}/intone-video
- mkdir -p "${D}/${datadir}/pixmaps"
+ install -d "${D}/${datadir}/pixmaps"
install -m 0644 "${S}/resources/intone-video.png" "${D}/${datadir}/pixmaps"
- mkdir -p "${D}/${datadir}/applications"
+ install -d "${D}/${datadir}/applications"
install -m 0644 "${S}/resources/intone-video.desktop" "${D}/${datadir}/applications"
}
diff --git a/recipes/intone/intone_svn.bb b/recipes/intone/intone_svn.bb
index e1af70d9aa..29773c2638 100644
--- a/recipes/intone/intone_svn.bb
+++ b/recipes/intone/intone_svn.bb
@@ -9,18 +9,12 @@ RDEPENDS = "mplayer lame libxv libsdl-x11"
PV = "0.66+svnr${SRCPV}"
PR = "r2"
-SRC_URI = "svn://intone.googlecode.com/svn/trunk;module=.;proto=http \
+SRC_URI = "svn://intone.googlecode.com/svn;module=trunk;proto=http \
file://vorbis-include-id3tag.patch;pnum=1;patch=1;maxrev=18"
-S = "${WORKDIR}"
+S = "${WORKDIR}/trunk"
inherit autotools
-do_configure_prepend() {
- rm -f "${S}/INSTALL"
- touch "${S}/INSTALL"
- sed -i 's/{prefix}\/doc\/intone$/{prefix}\/share\/doc\/intone/g' ${S}/Makefile.am
-}
-
do_install_append() {
mkdir -p "${D}/${datadir}/pixmaps"
install -m 0644 "${S}/resources/intone.png" "${D}/${datadir}/pixmaps"
diff --git a/recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch b/recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch
new file mode 100644
index 0000000000..be5d31d5e1
--- /dev/null
+++ b/recipes/iproute2/iproute2-2.6.29/use-cross-compiler.patch
@@ -0,0 +1,53 @@
+Patch to tc/Makefile is from
+http://bugs.gentoo.org/236861
+
+configure patch is based on suggestion from
+PR 5117
+
+http://bugs.openembedded.org/show_bug.cgi?id=5147
+
+-Khem
+Index: iproute2-2.6.29/configure
+===================================================================
+--- iproute2-2.6.29.orig/configure 2009-12-14 11:07:42.000000000 -0800
++++ iproute2-2.6.29/configure 2009-12-14 11:08:38.000000000 -0800
+@@ -16,7 +16,7 @@ int main(int argc, char **argv) {
+ return 0;
+ }
+ EOF
+-gcc -I$INCLUDE -o /tmp/atmtest /tmp/atmtest.c -latm >/dev/null 2>&1
++$CC -I$INCLUDE -o /tmp/atmtest /tmp/atmtest.c -latm >/dev/null 2>&1
+ if [ $? -eq 0 ]
+ then
+ echo "TC_CONFIG_ATM:=y" >>Config
+@@ -49,7 +49,7 @@ int main(int argc, char **argv) {
+ }
+
+ EOF
+-gcc -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
++$CC -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
+
+ if [ $? -eq 0 ]
+ then
+@@ -81,7 +81,7 @@ int main(int argc, char **argv) {
+ }
+
+ EOF
+-gcc -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
++$CC -I$INCLUDE $IPTC -o /tmp/ipttest /tmp/ipttest.c $IPTL -ldl >/dev/null 2>&1
+
+ if [ $? -eq 0 ]
+ then
+Index: iproute2-2.6.29/tc/Makefile
+===================================================================
+--- iproute2-2.6.29.orig/tc/Makefile 2009-12-14 11:10:27.000000000 -0800
++++ iproute2-2.6.29/tc/Makefile 2009-12-14 11:11:39.000000000 -0800
+@@ -100,7 +100,7 @@ clean:
+ rm -f emp_ematch.yacc.output
+
+ q_atm.so: q_atm.c
+- $(CC) $(CFLAGS) -shared -fpic -o q_atm.so q_atm.c -latm
++ $(CC) $(CFLAGS) $(LDFLAGS) -shared -fpic -o q_atm.so q_atm.c -latm
+
+ %.yacc.c: %.y
+ $(YACC) $(YACCFLAGS) -o $@ $<
diff --git a/recipes/iproute2/iproute2.inc b/recipes/iproute2/iproute2.inc
index 8c8519f230..9792bad4d6 100644
--- a/recipes/iproute2/iproute2.inc
+++ b/recipes/iproute2/iproute2.inc
@@ -4,6 +4,8 @@ SECTION = "base"
LICENSE = "GPL"
DEPENDS = "flex-native bison-native"
+INC_PR = "r3"
+
# Set the DATE in the .bb file
SRC_URI = "http://developer.osdl.org/dev/iproute2/download/${P}-${DATE}.tar.gz"
diff --git a/recipes/iproute2/iproute2_2.6.18.bb b/recipes/iproute2/iproute2_2.6.18.bb
index d442a091db..32e871118e 100644
--- a/recipes/iproute2/iproute2_2.6.18.bb
+++ b/recipes/iproute2/iproute2_2.6.18.bb
@@ -1,4 +1,4 @@
-PR = "r3"
+PR = "${INC_PR}.0"
require iproute2.inc
diff --git a/recipes/iproute2/iproute2_2.6.20.bb b/recipes/iproute2/iproute2_2.6.20.bb
index e72dfccdde..dd1a504142 100644
--- a/recipes/iproute2/iproute2_2.6.20.bb
+++ b/recipes/iproute2/iproute2_2.6.20.bb
@@ -1,6 +1,6 @@
require iproute2.inc
-PR = "r2"
+PR = "${INC_PR}.0"
DATE = "070313"
SRC_URI_append = " file://new-flex-fix.patch;patch=1 \
diff --git a/recipes/iproute2/iproute2_2.6.22.bb b/recipes/iproute2/iproute2_2.6.22.bb
index 1ee0ce7ab2..55e3a7575d 100644
--- a/recipes/iproute2/iproute2_2.6.22.bb
+++ b/recipes/iproute2/iproute2_2.6.22.bb
@@ -1,6 +1,6 @@
require iproute2.inc
-PR = "r1"
+PR = "${INC_PR}.0"
DATE = "070710"
SRC_URI_append = " file://new-flex-fix.patch;patch=1 \
diff --git a/recipes/iproute2/iproute2_2.6.29.bb b/recipes/iproute2/iproute2_2.6.29.bb
index d02573a359..d38dde880f 100644
--- a/recipes/iproute2/iproute2_2.6.29.bb
+++ b/recipes/iproute2/iproute2_2.6.29.bb
@@ -1,10 +1,11 @@
require iproute2.inc
-PR = "r1"
+PR = "${INC_PR}.0"
SRC_URI = "http://developer.osdl.org/dev/iproute2/download/${P}.tar.bz2 \
file://new-flex-fix.patch;patch=1 \
file://compilation-fix.patch;patch=1 \
+ file://use-cross-compiler.patch;patch=1 \
"
S = "${WORKDIR}/iproute2-${PV}"
diff --git a/recipes/kexec/kexec-tools-klibc-static_2.0.1.bb b/recipes/kexec/kexec-tools-klibc-static_2.0.1.bb
index 54e4601d20..1c2327f82e 100644
--- a/recipes/kexec/kexec-tools-klibc-static_2.0.1.bb
+++ b/recipes/kexec/kexec-tools-klibc-static_2.0.1.bb
@@ -3,7 +3,7 @@ require kexec-tools2.inc
DEFAULT_PREFERENCE = "1"
-PR = "r2"
+PR = "r3"
DEPENDS = "klibc"
SRC_URI += "file://kexec-tools-2-headers.patch;patch=1 \
@@ -16,11 +16,6 @@ EXTRA_OECONF = " --without-zlib"
export CC=${TARGET_PREFIX}klcc
-# standart oe cflags don't work with klcc
-export CFLAGS=""
-export CPPFLAGS=""
-export LDFLAGS=""
-
PACKAGES =+ "kexec-klibc-static kdump-klibc-static"
FILES_kexec-klibc-static = "${sbindir}/kexec"
diff --git a/recipes/klibc/klibc-1.5.15/isystem.patch b/recipes/klibc/klibc-1.5.15/isystem.patch
new file mode 100644
index 0000000000..2ec40c16c2
--- /dev/null
+++ b/recipes/klibc/klibc-1.5.15/isystem.patch
@@ -0,0 +1,13 @@
+Index: klibc-1.5.15/klcc/klcc.in
+===================================================================
+--- klibc-1.5.15.orig/klcc/klcc.in 2009-12-14 00:32:41.373661102 +0100
++++ klibc-1.5.15/klcc/klcc.in 2009-12-14 00:34:20.855735356 +0100
+@@ -147,7 +147,7 @@
+ } elsif ( $a =~ /^-([fmwWQdO]|std=|ansi|pedantic|M[GPD]|MMD)/ ) {
+ # Options to gcc
+ push(@ccopt, $a);
+- } elsif ( $a =~ /^-([DUI]|M[FQT])(.*)$/ ) {
++ } elsif ( $a =~ /^-([DUI]|M[FQT]|isystem)(.*)$/ ) {
+ # Options to gcc, which can take either a conjoined argument
+ # (-DFOO) or a disjoint argument (-D FOO)
+ push(@ccopt, $a);
diff --git a/recipes/klibc/klibc_1.5.15.bb b/recipes/klibc/klibc_1.5.15.bb
index 777dcd2c3c..97898f2b8e 100644
--- a/recipes/klibc/klibc_1.5.15.bb
+++ b/recipes/klibc/klibc_1.5.15.bb
@@ -1,4 +1,4 @@
require klibc_1.5.15.inc
-PR = "r4"
+PR = "r5"
KLIBC_FETCHDIR = "Testing"
diff --git a/recipes/klibc/klibc_1.5.15.inc b/recipes/klibc/klibc_1.5.15.inc
index f6f5b879ef..6615ac2899 100644
--- a/recipes/klibc/klibc_1.5.15.inc
+++ b/recipes/klibc/klibc_1.5.15.inc
@@ -4,7 +4,8 @@ SRC_URI += "file://staging.patch;patch=1 \
file://klibc_kexecsyscall.patch;patch=1 \
file://mntproc-definitions.patch;patch=1 \
file://signal-cleanup.patch;patch=1 \
- "
+ file://isystem.patch;patch=1 \
+ "
# we want only the shared programms and the lib so we chose them manually
do_install() {
diff --git a/recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch b/recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch
new file mode 100644
index 0000000000..4871601c97
--- /dev/null
+++ b/recipes/linux/linux-2.6.20/0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch
@@ -0,0 +1,29 @@
+From fc31c7716355a226b8ed4e16f4581e5c8fa53570 Mon Sep 17 00:00:00 2001
+From: Mike Frysinger <vapier@gentoo.org>
+Date: Thu, 17 May 2007 14:57:20 -0400
+Subject: [PATCH] kbuild: include limits.h in sumversion.c for PATH_MAX
+
+POSIX says limits.h defines PATH_MAX so we should include it (which fixes
+compiling on some systems like OS X).
+
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+---
+ scripts/mod/sumversion.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
+index 6873d5a..d9cc690 100644
+--- a/scripts/mod/sumversion.c
++++ b/scripts/mod/sumversion.c
+@@ -7,6 +7,7 @@
+ #include <ctype.h>
+ #include <errno.h>
+ #include <string.h>
++#include <limits.h>
+ #include "modpost.h"
+
+ /*
+--
+1.6.3.3
+
diff --git a/recipes/linux/linux-2.6.22.6/mx31moboard/defconfig b/recipes/linux/linux-2.6.22/mx31moboard/defconfig
index 848ec6b4ce..848ec6b4ce 100644
--- a/recipes/linux/linux-2.6.22.6/mx31moboard/defconfig
+++ b/recipes/linux/linux-2.6.22/mx31moboard/defconfig
diff --git a/recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch b/recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch
deleted file mode 100644
index a2f6e1765e..0000000000
--- a/recipes/linux/linux-2.6.23/em-x270/01-prevent_loop_timespec_add_ns.patch
+++ /dev/null
@@ -1,19 +0,0 @@
----
- include/linux/time.h | 4 ++++
- 1 files changed, 4 insertions(+), 0 deletions(-)
-diff --git a/include/linux/time.h b/include/linux/time.h
-index 2091a19..d32ef0a 100644
---- a/include/linux/time.h
-+++ b/include/linux/time.h
-@@ -173,6 +173,10 @@ static inline void timespec_add_ns(struct timespec *a, u64 ns)
- {
- ns += a->tv_nsec;
- while(unlikely(ns >= NSEC_PER_SEC)) {
-+ /* The following asm() prevents the compiler from
-+ * optimising this loop into a modulo operation. */
-+ asm("" : "+r"(ns));
-+
- ns -= NSEC_PER_SEC;
- a->tv_sec++;
- }
-
diff --git a/recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch b/recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch
new file mode 100644
index 0000000000..77ee5c8f1d
--- /dev/null
+++ b/recipes/linux/linux-2.6.23/sched-cfs-v2.6.23.12-v24.1.patch
@@ -0,0 +1,8567 @@
+---
+ Documentation/sched-design-CFS.txt | 67 +
+ Makefile | 2
+ arch/i386/Kconfig | 11
+ drivers/kvm/kvm.h | 10
+ fs/pipe.c | 9
+ fs/proc/array.c | 21
+ fs/proc/base.c | 2
+ fs/proc/proc_misc.c | 15
+ include/linux/cgroup.h | 12
+ include/linux/cpuset.h | 5
+ include/linux/kernel.h | 7
+ include/linux/kernel_stat.h | 3
+ include/linux/nodemask.h | 94 +
+ include/linux/sched.h | 174 ++
+ include/linux/taskstats.h | 7
+ include/linux/topology.h | 5
+ init/Kconfig | 26
+ init/main.c | 3
+ kernel/delayacct.c | 8
+ kernel/exit.c | 6
+ kernel/fork.c | 5
+ kernel/ksysfs.c | 8
+ kernel/sched.c | 2310 +++++++++++++++++++++++--------------
+ kernel/sched_debug.c | 289 +++-
+ kernel/sched_fair.c | 885 ++++++--------
+ kernel/sched_idletask.c | 26
+ kernel/sched_rt.c | 54
+ kernel/sched_stats.h | 40
+ kernel/sysctl.c | 40
+ kernel/timer.c | 7
+ kernel/tsacct.c | 4
+ kernel/user.c | 249 +++
+ mm/memory_hotplug.c | 7
+ mm/page_alloc.c | 50
+ mm/vmscan.c | 4
+ net/unix/af_unix.c | 4
+ 36 files changed, 2883 insertions(+), 1586 deletions(-)
+
+--- linux-2.6.23.orig/Documentation/sched-design-CFS.txt
++++ linux-2.6.23/Documentation/sched-design-CFS.txt
+@@ -115,5 +115,72 @@ Some implementation details:
+ - reworked/sanitized SMP load-balancing: the runqueue-walking
+ assumptions are gone from the load-balancing code now, and
+ iterators of the scheduling modules are used. The balancing code got
+ quite a bit simpler as a result.
+
++
++Group scheduler extension to CFS
++================================
++
++Normally the scheduler operates on individual tasks and strives to provide
++fair CPU time to each task. Sometimes, it may be desirable to group tasks
++and provide fair CPU time to each such task group. For example, it may
++be desirable to first provide fair CPU time to each user on the system
++and then to each task belonging to a user.
++
++CONFIG_FAIR_GROUP_SCHED strives to achieve exactly that. It lets
++SCHED_NORMAL/BATCH tasks be be grouped and divides CPU time fairly among such
++groups. At present, there are two (mutually exclusive) mechanisms to group
++tasks for CPU bandwidth control purpose:
++
++ - Based on user id (CONFIG_FAIR_USER_SCHED)
++ In this option, tasks are grouped according to their user id.
++ - Based on "cgroup" pseudo filesystem (CONFIG_FAIR_CGROUP_SCHED)
++ This options lets the administrator create arbitrary groups
++ of tasks, using the "cgroup" pseudo filesystem. See
++ Documentation/cgroups.txt for more information about this
++ filesystem.
++
++Only one of these options to group tasks can be chosen and not both.
++
++Group scheduler tunables:
++
++When CONFIG_FAIR_USER_SCHED is defined, a directory is created in sysfs for
++each new user and a "cpu_share" file is added in that directory.
++
++ # cd /sys/kernel/uids
++ # cat 512/cpu_share # Display user 512's CPU share
++ 1024
++ # echo 2048 > 512/cpu_share # Modify user 512's CPU share
++ # cat 512/cpu_share # Display user 512's CPU share
++ 2048
++ #
++
++CPU bandwidth between two users are divided in the ratio of their CPU shares.
++For ex: if you would like user "root" to get twice the bandwidth of user
++"guest", then set the cpu_share for both the users such that "root"'s
++cpu_share is twice "guest"'s cpu_share
++
++
++When CONFIG_FAIR_CGROUP_SCHED is defined, a "cpu.shares" file is created
++for each group created using the pseudo filesystem. See example steps
++below to create task groups and modify their CPU share using the "cgroups"
++pseudo filesystem
++
++ # mkdir /dev/cpuctl
++ # mount -t cgroup -ocpu none /dev/cpuctl
++ # cd /dev/cpuctl
++
++ # mkdir multimedia # create "multimedia" group of tasks
++ # mkdir browser # create "browser" group of tasks
++
++ # #Configure the multimedia group to receive twice the CPU bandwidth
++ # #that of browser group
++
++ # echo 2048 > multimedia/cpu.shares
++ # echo 1024 > browser/cpu.shares
++
++ # firefox & # Launch firefox and move it to "browser" group
++ # echo <firefox_pid> > browser/tasks
++
++ # #Launch gmplayer (or your favourite movie player)
++ # echo <movie_player_pid> > multimedia/tasks
+--- linux-2.6.23.orig/Makefile
++++ linux-2.6.23/Makefile
+@@ -1,9 +1,9 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 23
+-EXTRAVERSION = .17
++EXTRAVERSION = .17-cfs-v24.1
+ NAME = Arr Matey! A Hairy Bilge Rat!
+
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+ # More info can be located in ./README
+--- linux-2.6.23.orig/arch/i386/Kconfig
++++ linux-2.6.23/arch/i386/Kconfig
+@@ -212,10 +212,21 @@ config X86_ES7000
+ Only choose this option if you have such a system, otherwise you
+ should say N here.
+
+ endchoice
+
++config SCHED_NO_NO_OMIT_FRAME_POINTER
++ bool "Single-depth WCHAN output"
++ default y
++ help
++ Calculate simpler /proc/<PID>/wchan values. If this option
++ is disabled then wchan values will recurse back to the
++ caller function. This provides more accurate wchan values,
++ at the expense of slightly more scheduling overhead.
++
++ If in doubt, say "Y".
++
+ config PARAVIRT
+ bool "Paravirtualization support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+--- linux-2.6.23.orig/drivers/kvm/kvm.h
++++ linux-2.6.23/drivers/kvm/kvm.h
+@@ -623,10 +623,20 @@ void __kvm_mmu_free_some_pages(struct kv
+ int kvm_mmu_load(struct kvm_vcpu *vcpu);
+ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+
+ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
++static inline void kvm_guest_enter(void)
++{
++ current->flags |= PF_VCPU;
++}
++
++static inline void kvm_guest_exit(void)
++{
++ current->flags &= ~PF_VCPU;
++}
++
+ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+ u32 error_code)
+ {
+ return vcpu->mmu.page_fault(vcpu, gva, error_code);
+ }
+--- linux-2.6.23.orig/fs/pipe.c
++++ linux-2.6.23/fs/pipe.c
+@@ -43,12 +43,11 @@ void pipe_wait(struct pipe_inode_info *p
+
+ /*
+ * Pipes are system-local resources, so sleeping on them
+ * is considered a noninteractive wait:
+ */
+- prepare_to_wait(&pipe->wait, &wait,
+- TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
++ prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
+ if (pipe->inode)
+ mutex_unlock(&pipe->inode->i_mutex);
+ schedule();
+ finish_wait(&pipe->wait, &wait);
+ if (pipe->inode)
+@@ -381,11 +380,11 @@ redo:
+ }
+ mutex_unlock(&inode->i_mutex);
+
+ /* Signal writers asynchronously that there is more room. */
+ if (do_wakeup) {
+- wake_up_interruptible(&pipe->wait);
++ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
+ if (ret > 0)
+ file_accessed(filp);
+ return ret;
+@@ -554,11 +553,11 @@ redo2:
+ pipe->waiting_writers--;
+ }
+ out:
+ mutex_unlock(&inode->i_mutex);
+ if (do_wakeup) {
+- wake_up_interruptible(&pipe->wait);
++ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ }
+ if (ret > 0)
+ file_update_time(filp);
+ return ret;
+@@ -648,11 +647,11 @@ pipe_release(struct inode *inode, int de
+ pipe->writers -= decw;
+
+ if (!pipe->readers && !pipe->writers) {
+ free_pipe_info(inode);
+ } else {
+- wake_up_interruptible(&pipe->wait);
++ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
+ mutex_unlock(&inode->i_mutex);
+
+--- linux-2.6.23.orig/fs/proc/array.c
++++ linux-2.6.23/fs/proc/array.c
+@@ -365,15 +365,22 @@ static cputime_t task_stime(struct task_
+ * grows monotonically - apps rely on that):
+ */
+ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+ cputime_to_clock_t(task_utime(p));
+
+- p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
++ if (stime >= 0)
++ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
++
+ return p->prev_stime;
+ }
+ #endif
+
++static cputime_t task_gtime(struct task_struct *p)
++{
++ return p->gtime;
++}
++
+ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
+ {
+ unsigned long vsize, eip, esp, wchan = ~0UL;
+ long priority, nice;
+ int tty_pgrp = -1, tty_nr = 0;
+@@ -385,10 +392,11 @@ static int do_task_stat(struct task_stru
+ struct mm_struct *mm;
+ unsigned long long start_time;
+ unsigned long cmin_flt = 0, cmaj_flt = 0;
+ unsigned long min_flt = 0, maj_flt = 0;
+ cputime_t cutime, cstime, utime, stime;
++ cputime_t cgtime, gtime;
+ unsigned long rsslim = 0;
+ char tcomm[sizeof(task->comm)];
+ unsigned long flags;
+
+ state = *get_task_state(task);
+@@ -403,10 +411,11 @@ static int do_task_stat(struct task_stru
+ get_task_comm(tcomm, task);
+
+ sigemptyset(&sigign);
+ sigemptyset(&sigcatch);
+ cutime = cstime = utime = stime = cputime_zero;
++ cgtime = gtime = cputime_zero;
+
+ rcu_read_lock();
+ if (lock_task_sighand(task, &flags)) {
+ struct signal_struct *sig = task->signal;
+
+@@ -420,27 +429,30 @@ static int do_task_stat(struct task_stru
+
+ cmin_flt = sig->cmin_flt;
+ cmaj_flt = sig->cmaj_flt;
+ cutime = sig->cutime;
+ cstime = sig->cstime;
++ cgtime = sig->cgtime;
+ rsslim = sig->rlim[RLIMIT_RSS].rlim_cur;
+
+ /* add up live thread stats at the group level */
+ if (whole) {
+ struct task_struct *t = task;
+ do {
+ min_flt += t->min_flt;
+ maj_flt += t->maj_flt;
+ utime = cputime_add(utime, task_utime(t));
+ stime = cputime_add(stime, task_stime(t));
++ gtime = cputime_add(gtime, task_gtime(t));
+ t = next_thread(t);
+ } while (t != task);
+
+ min_flt += sig->min_flt;
+ maj_flt += sig->maj_flt;
+ utime = cputime_add(utime, sig->utime);
+ stime = cputime_add(stime, sig->stime);
++ gtime = cputime_add(gtime, sig->gtime);
+ }
+
+ sid = signal_session(sig);
+ pgid = process_group(task);
+ ppid = rcu_dereference(task->real_parent)->tgid;
+@@ -454,10 +466,11 @@ static int do_task_stat(struct task_stru
+ if (!whole) {
+ min_flt = task->min_flt;
+ maj_flt = task->maj_flt;
+ utime = task_utime(task);
+ stime = task_stime(task);
++ gtime = task_gtime(task);
+ }
+
+ /* scale priority and nice values from timeslices to -20..20 */
+ /* to make it look like a "normal" Unix priority/nice value */
+ priority = task_prio(task);
+@@ -471,11 +484,11 @@ static int do_task_stat(struct task_stru
+ /* convert nsec -> ticks */
+ start_time = nsec_to_clock_t(start_time);
+
+ res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \
+ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
+-%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu\n",
++%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
+ task->pid,
+ tcomm,
+ state,
+ ppid,
+ pgid,
+@@ -516,11 +529,13 @@ static int do_task_stat(struct task_stru
+ 0UL,
+ task->exit_signal,
+ task_cpu(task),
+ task->rt_priority,
+ task->policy,
+- (unsigned long long)delayacct_blkio_ticks(task));
++ (unsigned long long)delayacct_blkio_ticks(task),
++ cputime_to_clock_t(gtime),
++ cputime_to_clock_t(cgtime));
+ if (mm)
+ mmput(mm);
+ return res;
+ }
+
+--- linux-2.6.23.orig/fs/proc/base.c
++++ linux-2.6.23/fs/proc/base.c
+@@ -302,11 +302,11 @@ static int proc_pid_wchan(struct task_st
+ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
+ {
+ return sprintf(buffer, "%llu %llu %lu\n",
+ task->sched_info.cpu_time,
+ task->sched_info.run_delay,
+- task->sched_info.pcnt);
++ task->sched_info.pcount);
+ }
+ #endif
+
+ /* The badness from the OOM killer */
+ unsigned long badness(struct task_struct *p, unsigned long uptime);
+--- linux-2.6.23.orig/fs/proc/proc_misc.c
++++ linux-2.6.23/fs/proc/proc_misc.c
+@@ -441,20 +441,22 @@ static const struct file_operations proc
+ static int show_stat(struct seq_file *p, void *v)
+ {
+ int i;
+ unsigned long jif;
+ cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
++ cputime64_t guest;
+ u64 sum = 0;
+ struct timespec boottime;
+ unsigned int *per_irq_sum;
+
+ per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
+ if (!per_irq_sum)
+ return -ENOMEM;
+
+ user = nice = system = idle = iowait =
+ irq = softirq = steal = cputime64_zero;
++ guest = cputime64_zero;
+ getboottime(&boottime);
+ jif = boottime.tv_sec;
+
+ for_each_possible_cpu(i) {
+ int j;
+@@ -465,26 +467,28 @@ static int show_stat(struct seq_file *p,
+ idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
+ iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
+ irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
+ softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
+ steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
++ guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
+ for (j = 0; j < NR_IRQS; j++) {
+ unsigned int temp = kstat_cpu(i).irqs[j];
+ sum += temp;
+ per_irq_sum[j] += temp;
+ }
+ }
+
+- seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n",
++ seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ (unsigned long long)cputime64_to_clock_t(user),
+ (unsigned long long)cputime64_to_clock_t(nice),
+ (unsigned long long)cputime64_to_clock_t(system),
+ (unsigned long long)cputime64_to_clock_t(idle),
+ (unsigned long long)cputime64_to_clock_t(iowait),
+ (unsigned long long)cputime64_to_clock_t(irq),
+ (unsigned long long)cputime64_to_clock_t(softirq),
+- (unsigned long long)cputime64_to_clock_t(steal));
++ (unsigned long long)cputime64_to_clock_t(steal),
++ (unsigned long long)cputime64_to_clock_t(guest));
+ for_each_online_cpu(i) {
+
+ /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
+ user = kstat_cpu(i).cpustat.user;
+ nice = kstat_cpu(i).cpustat.nice;
+@@ -492,20 +496,23 @@ static int show_stat(struct seq_file *p,
+ idle = kstat_cpu(i).cpustat.idle;
+ iowait = kstat_cpu(i).cpustat.iowait;
+ irq = kstat_cpu(i).cpustat.irq;
+ softirq = kstat_cpu(i).cpustat.softirq;
+ steal = kstat_cpu(i).cpustat.steal;
+- seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
++ guest = kstat_cpu(i).cpustat.guest;
++ seq_printf(p,
++ "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ i,
+ (unsigned long long)cputime64_to_clock_t(user),
+ (unsigned long long)cputime64_to_clock_t(nice),
+ (unsigned long long)cputime64_to_clock_t(system),
+ (unsigned long long)cputime64_to_clock_t(idle),
+ (unsigned long long)cputime64_to_clock_t(iowait),
+ (unsigned long long)cputime64_to_clock_t(irq),
+ (unsigned long long)cputime64_to_clock_t(softirq),
+- (unsigned long long)cputime64_to_clock_t(steal));
++ (unsigned long long)cputime64_to_clock_t(steal),
++ (unsigned long long)cputime64_to_clock_t(guest));
+ }
+ seq_printf(p, "intr %llu", (unsigned long long)sum);
+
+ #ifndef CONFIG_SMP
+ /* Touches too many cache lines on SMP setups */
+--- /dev/null
++++ linux-2.6.23/include/linux/cgroup.h
+@@ -0,0 +1,12 @@
++#ifndef _LINUX_CGROUP_H
++#define _LINUX_CGROUP_H
++
++/*
++ * Control groups are not backported - we use a few compatibility
++ * defines to be able to use the upstream sched.c as-is:
++ */
++#define task_pid_nr(task) (task)->pid
++#define task_pid_vnr(task) (task)->pid
++#define find_task_by_vpid(pid) find_task_by_pid(pid)
++
++#endif
+--- linux-2.6.23.orig/include/linux/cpuset.h
++++ linux-2.6.23/include/linux/cpuset.h
+@@ -144,8 +144,13 @@ static inline int cpuset_do_slab_mem_spr
+ return 0;
+ }
+
+ static inline void cpuset_track_online_nodes(void) {}
+
++static inline cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p)
++{
++ return cpu_possible_map;
++}
++
+ #endif /* !CONFIG_CPUSETS */
+
+ #endif /* _LINUX_CPUSET_H */
+--- linux-2.6.23.orig/include/linux/kernel.h
++++ linux-2.6.23/include/linux/kernel.h
+@@ -59,10 +59,17 @@ extern const char linux_proc_banner[];
+ #define KERN_WARNING "<4>" /* warning conditions */
+ #define KERN_NOTICE "<5>" /* normal but significant condition */
+ #define KERN_INFO "<6>" /* informational */
+ #define KERN_DEBUG "<7>" /* debug-level messages */
+
++/*
++ * Annotation for a "continued" line of log printout (only done after a
++ * line that had no enclosing \n). Only to be used by core/arch code
++ * during early bootup (a continued line is not SMP-safe otherwise).
++ */
++#define KERN_CONT ""
++
+ extern int console_printk[];
+
+ #define console_loglevel (console_printk[0])
+ #define default_message_loglevel (console_printk[1])
+ #define minimum_console_loglevel (console_printk[2])
+--- linux-2.6.23.orig/include/linux/kernel_stat.h
++++ linux-2.6.23/include/linux/kernel_stat.h
+@@ -21,10 +21,11 @@ struct cpu_usage_stat {
+ cputime64_t softirq;
+ cputime64_t irq;
+ cputime64_t idle;
+ cputime64_t iowait;
+ cputime64_t steal;
++ cputime64_t guest;
+ };
+
+ struct kernel_stat {
+ struct cpu_usage_stat cpustat;
+ unsigned int irqs[NR_IRQS];
+@@ -50,9 +51,11 @@ static inline int kstat_irqs(int irq)
+
+ return sum;
+ }
+
+ extern void account_user_time(struct task_struct *, cputime_t);
++extern void account_user_time_scaled(struct task_struct *, cputime_t);
+ extern void account_system_time(struct task_struct *, int, cputime_t);
++extern void account_system_time_scaled(struct task_struct *, cputime_t);
+ extern void account_steal_time(struct task_struct *, cputime_t);
+
+ #endif /* _LINUX_KERNEL_STAT_H */
+--- linux-2.6.23.orig/include/linux/nodemask.h
++++ linux-2.6.23/include/linux/nodemask.h
+@@ -336,46 +336,108 @@ static inline void __nodes_remap(nodemas
+ if (!nodes_empty(mask)) \
+ for ((node) = 0; (node) < 1; (node)++)
+ #endif /* MAX_NUMNODES */
+
+ /*
++ * Bitmasks that are kept for all the nodes.
++ */
++enum node_states {
++ N_POSSIBLE, /* The node could become online at some point */
++ N_ONLINE, /* The node is online */
++ N_NORMAL_MEMORY, /* The node has regular memory */
++#ifdef CONFIG_HIGHMEM
++ N_HIGH_MEMORY, /* The node has regular or high memory */
++#else
++ N_HIGH_MEMORY = N_NORMAL_MEMORY,
++#endif
++ N_CPU, /* The node has one or more cpus */
++ NR_NODE_STATES
++};
++
++/*
+ * The following particular system nodemasks and operations
+ * on them manage all possible and online nodes.
+ */
+
+-extern nodemask_t node_online_map;
+-extern nodemask_t node_possible_map;
++extern nodemask_t node_states[NR_NODE_STATES];
+
+ #if MAX_NUMNODES > 1
+-#define num_online_nodes() nodes_weight(node_online_map)
+-#define num_possible_nodes() nodes_weight(node_possible_map)
+-#define node_online(node) node_isset((node), node_online_map)
+-#define node_possible(node) node_isset((node), node_possible_map)
+-#define first_online_node first_node(node_online_map)
+-#define next_online_node(nid) next_node((nid), node_online_map)
++static inline int node_state(int node, enum node_states state)
++{
++ return node_isset(node, node_states[state]);
++}
++
++static inline void node_set_state(int node, enum node_states state)
++{
++ __node_set(node, &node_states[state]);
++}
++
++static inline void node_clear_state(int node, enum node_states state)
++{
++ __node_clear(node, &node_states[state]);
++}
++
++static inline int num_node_state(enum node_states state)
++{
++ return nodes_weight(node_states[state]);
++}
++
++#define for_each_node_state(__node, __state) \
++ for_each_node_mask((__node), node_states[__state])
++
++#define first_online_node first_node(node_states[N_ONLINE])
++#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
++
+ extern int nr_node_ids;
+ #else
+-#define num_online_nodes() 1
+-#define num_possible_nodes() 1
+-#define node_online(node) ((node) == 0)
+-#define node_possible(node) ((node) == 0)
++
++static inline int node_state(int node, enum node_states state)
++{
++ return node == 0;
++}
++
++static inline void node_set_state(int node, enum node_states state)
++{
++}
++
++static inline void node_clear_state(int node, enum node_states state)
++{
++}
++
++static inline int num_node_state(enum node_states state)
++{
++ return 1;
++}
++
++#define for_each_node_state(node, __state) \
++ for ( (node) = 0; (node) == 0; (node) = 1)
++
+ #define first_online_node 0
+ #define next_online_node(nid) (MAX_NUMNODES)
+ #define nr_node_ids 1
++
+ #endif
+
++#define node_online_map node_states[N_ONLINE]
++#define node_possible_map node_states[N_POSSIBLE]
++
+ #define any_online_node(mask) \
+ ({ \
+ int node; \
+ for_each_node_mask(node, (mask)) \
+ if (node_online(node)) \
+ break; \
+ node; \
+ })
+
+-#define node_set_online(node) set_bit((node), node_online_map.bits)
+-#define node_set_offline(node) clear_bit((node), node_online_map.bits)
++#define num_online_nodes() num_node_state(N_ONLINE)
++#define num_possible_nodes() num_node_state(N_POSSIBLE)
++#define node_online(node) node_state((node), N_ONLINE)
++#define node_possible(node) node_state((node), N_POSSIBLE)
++
++#define node_set_online(node) node_set_state((node), N_ONLINE)
++#define node_set_offline(node) node_clear_state((node), N_ONLINE)
+
+-#define for_each_node(node) for_each_node_mask((node), node_possible_map)
+-#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
++#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
++#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+
+ #endif /* __LINUX_NODEMASK_H */
+--- linux-2.6.23.orig/include/linux/sched.h
++++ linux-2.6.23/include/linux/sched.h
+@@ -1,10 +1,21 @@
+ #ifndef _LINUX_SCHED_H
+ #define _LINUX_SCHED_H
+
+ #include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
+
++/* backporting helper macro: */
++#define cpu_sibling_map(cpu) cpu_sibling_map[cpu]
++
++/*
++ * * Control groups are not backported - we use a few compatibility
++ * * defines to be able to use the upstream sched.c as-is:
++ * */
++#define task_pid_nr(task) (task)->pid
++#define task_pid_vnr(task) (task)->pid
++#define find_task_by_vpid(pid) find_task_by_pid(pid)
++
+ /*
+ * cloning flags:
+ */
+ #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+ #define CLONE_VM 0x00000100 /* set if VM shared between processes */
+@@ -84,10 +95,11 @@ struct sched_param {
+ #include <linux/param.h>
+ #include <linux/resource.h>
+ #include <linux/timer.h>
+ #include <linux/hrtimer.h>
+ #include <linux/task_io_accounting.h>
++#include <linux/kobject.h>
+
+ #include <asm/processor.h>
+
+ struct exec_domain;
+ struct futex_pi_state;
+@@ -133,10 +145,11 @@ extern unsigned long nr_active(void);
+ extern unsigned long nr_iowait(void);
+ extern unsigned long weighted_cpuload(const int cpu);
+
+ struct seq_file;
+ struct cfs_rq;
++struct task_group;
+ #ifdef CONFIG_SCHED_DEBUG
+ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+ extern void proc_sched_set_task(struct task_struct *p);
+ extern void
+ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+@@ -171,12 +184,11 @@ print_cfs_rq(struct seq_file *m, int cpu
+ #define TASK_TRACED 8
+ /* in tsk->exit_state */
+ #define EXIT_ZOMBIE 16
+ #define EXIT_DEAD 32
+ /* in tsk->state again */
+-#define TASK_NONINTERACTIVE 64
+-#define TASK_DEAD 128
++#define TASK_DEAD 64
+
+ #define __set_task_state(tsk, state_value) \
+ do { (tsk)->state = (state_value); } while (0)
+ #define set_task_state(tsk, state_value) \
+ set_mb((tsk)->state, (state_value))
+@@ -276,10 +288,14 @@ static inline void touch_all_softlockup_
+ #endif
+
+
+ /* Attach to any functions which should be ignored in wchan output. */
+ #define __sched __attribute__((__section__(".sched.text")))
++
++/* Linker adds these: start and end of __sched functions */
++extern char __sched_text_start[], __sched_text_end[];
++
+ /* Is this address in the __sched functions? */
+ extern int in_sched_functions(unsigned long addr);
+
+ #define MAX_SCHEDULE_TIMEOUT LONG_MAX
+ extern signed long FASTCALL(schedule_timeout(signed long timeout));
+@@ -513,10 +529,12 @@ struct signal_struct {
+ * and for reaped dead child processes forked by this group.
+ * Live threads maintain their own counters and add to these
+ * in __exit_signal, except for the group leader.
+ */
+ cputime_t utime, stime, cutime, cstime;
++ cputime_t gtime;
++ cputime_t cgtime;
+ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
+ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
+
+ /*
+@@ -593,12 +611,27 @@ struct user_struct {
+ #endif
+
+ /* Hash table maintenance information */
+ struct hlist_node uidhash_node;
+ uid_t uid;
++
++#ifdef CONFIG_FAIR_USER_SCHED
++ struct task_group *tg;
++#ifdef CONFIG_SYSFS
++ struct kset kset;
++ struct subsys_attribute user_attr;
++ struct work_struct work;
++#endif
++#endif
+ };
+
++#ifdef CONFIG_FAIR_USER_SCHED
++extern int uids_kobject_init(void);
++#else
++static inline int uids_kobject_init(void) { return 0; }
++#endif
++
+ extern struct user_struct *find_user(uid_t);
+
+ extern struct user_struct root_user;
+ #define INIT_USER (&root_user)
+
+@@ -606,17 +639,21 @@ struct backing_dev_info;
+ struct reclaim_state;
+
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ struct sched_info {
+ /* cumulative counters */
+- unsigned long pcnt; /* # of times run on this cpu */
++ unsigned long pcount; /* # of times run on this cpu */
+ unsigned long long cpu_time, /* time spent on the cpu */
+ run_delay; /* time spent waiting on a runqueue */
+
+ /* timestamps */
+ unsigned long long last_arrival,/* when we last ran on a cpu */
+ last_queued; /* when we were last queued to run */
++#ifdef CONFIG_SCHEDSTATS
++ /* BKL stats */
++ unsigned int bkl_count;
++#endif
+ };
+ #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+
+ #ifdef CONFIG_SCHEDSTATS
+ extern const struct file_operations proc_schedstat_operations;
+@@ -747,43 +784,42 @@ struct sched_domain {
+ unsigned int balance_interval; /* initialise to 1. units in ms. */
+ unsigned int nr_balance_failed; /* initialise to 0 */
+
+ #ifdef CONFIG_SCHEDSTATS
+ /* load_balance() stats */
+- unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_gained[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES];
+- unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_count[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
++ unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
+
+ /* Active load balancing */
+- unsigned long alb_cnt;
+- unsigned long alb_failed;
+- unsigned long alb_pushed;
++ unsigned int alb_count;
++ unsigned int alb_failed;
++ unsigned int alb_pushed;
+
+ /* SD_BALANCE_EXEC stats */
+- unsigned long sbe_cnt;
+- unsigned long sbe_balanced;
+- unsigned long sbe_pushed;
++ unsigned int sbe_count;
++ unsigned int sbe_balanced;
++ unsigned int sbe_pushed;
+
+ /* SD_BALANCE_FORK stats */
+- unsigned long sbf_cnt;
+- unsigned long sbf_balanced;
+- unsigned long sbf_pushed;
++ unsigned int sbf_count;
++ unsigned int sbf_balanced;
++ unsigned int sbf_pushed;
+
+ /* try_to_wake_up() stats */
+- unsigned long ttwu_wake_remote;
+- unsigned long ttwu_move_affine;
+- unsigned long ttwu_move_balance;
++ unsigned int ttwu_wake_remote;
++ unsigned int ttwu_move_affine;
++ unsigned int ttwu_move_balance;
+ #endif
+ };
+
+-extern int partition_sched_domains(cpumask_t *partition1,
+- cpumask_t *partition2);
++extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
+
+ #endif /* CONFIG_SMP */
+
+ /*
+ * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
+@@ -851,27 +887,32 @@ struct uts_namespace;
+
+ struct rq;
+ struct sched_domain;
+
+ struct sched_class {
+- struct sched_class *next;
++ const struct sched_class *next;
+
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
+- void (*yield_task) (struct rq *rq, struct task_struct *p);
++ void (*yield_task) (struct rq *rq);
+
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+
+ struct task_struct * (*pick_next_task) (struct rq *rq);
+ void (*put_prev_task) (struct rq *rq, struct task_struct *p);
+
++#ifdef CONFIG_SMP
+ unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
+- struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
++ struct rq *busiest, unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, int *this_best_prio);
+
++ int (*move_one_task) (struct rq *this_rq, int this_cpu,
++ struct rq *busiest, struct sched_domain *sd,
++ enum cpu_idle_type idle);
++#endif
++
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
+ };
+
+@@ -885,46 +926,52 @@ struct load_weight {
+ * Current field usage histogram:
+ *
+ * 4 se->block_start
+ * 4 se->run_node
+ * 4 se->sleep_start
+- * 4 se->sleep_start_fair
+ * 6 se->load.weight
+- * 7 se->delta_fair
+- * 15 se->wait_runtime
+ */
+ struct sched_entity {
+- long wait_runtime;
+- unsigned long delta_fair_run;
+- unsigned long delta_fair_sleep;
+- unsigned long delta_exec;
+- s64 fair_key;
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
++ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+- u64 wait_start_fair;
+- u64 sleep_start_fair;
+
+ #ifdef CONFIG_SCHEDSTATS
+ u64 wait_start;
+ u64 wait_max;
+- s64 sum_wait_runtime;
+
+ u64 sleep_start;
+ u64 sleep_max;
+ s64 sum_sleep_runtime;
+
+ u64 block_start;
+ u64 block_max;
+ u64 exec_max;
++ u64 slice_max;
+
+- unsigned long wait_runtime_overruns;
+- unsigned long wait_runtime_underruns;
++ u64 nr_migrations;
++ u64 nr_migrations_cold;
++ u64 nr_failed_migrations_affine;
++ u64 nr_failed_migrations_running;
++ u64 nr_failed_migrations_hot;
++ u64 nr_forced_migrations;
++ u64 nr_forced2_migrations;
++
++ u64 nr_wakeups;
++ u64 nr_wakeups_sync;
++ u64 nr_wakeups_migrate;
++ u64 nr_wakeups_local;
++ u64 nr_wakeups_remote;
++ u64 nr_wakeups_affine;
++ u64 nr_wakeups_affine_attempts;
++ u64 nr_wakeups_passive;
++ u64 nr_wakeups_idle;
+ #endif
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+@@ -949,11 +996,11 @@ struct task_struct {
+ #endif
+ #endif
+
+ int prio, static_prio, normal_prio;
+ struct list_head run_list;
+- struct sched_class *sched_class;
++ const struct sched_class *sched_class;
+ struct sched_entity se;
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+@@ -1019,11 +1066,12 @@ struct task_struct {
+ struct completion *vfork_done; /* for vfork() */
+ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ unsigned int rt_priority;
+- cputime_t utime, stime;
++ cputime_t utime, stime, utimescaled, stimescaled;
++ cputime_t gtime;
+ cputime_t prev_utime, prev_stime;
+ unsigned long nvcsw, nivcsw; /* context switch counts */
+ struct timespec start_time; /* monotonic time */
+ struct timespec real_start_time; /* boot based time */
+ /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+@@ -1312,10 +1360,11 @@ static inline void put_task_struct(struc
+ #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+ #define PF_STARTING 0x00000002 /* being created */
+ #define PF_EXITING 0x00000004 /* getting shut down */
+ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
++#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+ #define PF_DUMPCORE 0x00000200 /* dumped core */
+ #define PF_SIGNALED 0x00000400 /* killed by a signal */
+ #define PF_MEMALLOC 0x00000800 /* Allocating memory */
+@@ -1399,19 +1448,30 @@ extern void idle_task_exit(void);
+ static inline void idle_task_exit(void) {}
+ #endif
+
+ extern void sched_idle_next(void);
+
++#ifdef CONFIG_SCHED_DEBUG
+ extern unsigned int sysctl_sched_latency;
+ extern unsigned int sysctl_sched_min_granularity;
+ extern unsigned int sysctl_sched_wakeup_granularity;
+ extern unsigned int sysctl_sched_batch_wakeup_granularity;
+-extern unsigned int sysctl_sched_stat_granularity;
+-extern unsigned int sysctl_sched_runtime_limit;
+-extern unsigned int sysctl_sched_compat_yield;
+ extern unsigned int sysctl_sched_child_runs_first;
+ extern unsigned int sysctl_sched_features;
++extern unsigned int sysctl_sched_migration_cost;
++extern unsigned int sysctl_sched_nr_migrate;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++extern unsigned int sysctl_sched_min_bal_int_shares;
++extern unsigned int sysctl_sched_max_bal_int_shares;
++#endif
++
++int sched_nr_latency_handler(struct ctl_table *table, int write,
++ struct file *file, void __user *buffer, size_t *length,
++ loff_t *ppos);
++#endif
++
++extern unsigned int sysctl_sched_compat_yield;
+
+ #ifdef CONFIG_RT_MUTEXES
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+@@ -1841,10 +1901,22 @@ extern long sched_getaffinity(pid_t pid,
+
+ extern int sched_mc_power_savings, sched_smt_power_savings;
+
+ extern void normalize_rt_tasks(void);
+
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++extern struct task_group init_task_group;
++
++extern struct task_group *sched_create_group(void);
++extern void sched_destroy_group(struct task_group *tg);
++extern void sched_move_task(struct task_struct *tsk);
++extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
++extern unsigned long sched_group_shares(struct task_group *tg);
++
++#endif
++
+ #ifdef CONFIG_TASK_XACCT
+ static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+ {
+ tsk->rchar += amt;
+ }
+@@ -1879,8 +1951,16 @@ static inline void inc_syscr(struct task
+ static inline void inc_syscw(struct task_struct *tsk)
+ {
+ }
+ #endif
+
++#ifdef CONFIG_SMP
++void migration_init(void);
++#else
++static inline void migration_init(void)
++{
++}
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif
+--- linux-2.6.23.orig/include/linux/taskstats.h
++++ linux-2.6.23/include/linux/taskstats.h
+@@ -29,11 +29,11 @@
+ * b) add comment indicating new version number at end of struct
+ * c) add new fields after version comment; maintain 64-bit alignment
+ */
+
+
+-#define TASKSTATS_VERSION 5
++#define TASKSTATS_VERSION 6
+ #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
+ * in linux/sched.h */
+
+ struct taskstats {
+
+@@ -150,10 +150,15 @@ struct taskstats {
+ __u64 write_bytes; /* bytes of write I/O */
+ __u64 cancelled_write_bytes; /* bytes of cancelled write I/O */
+
+ __u64 nvcsw; /* voluntary_ctxt_switches */
+ __u64 nivcsw; /* nonvoluntary_ctxt_switches */
++
++ /* time accounting for SMT machines */
++ __u64 ac_utimescaled; /* utime scaled on frequency etc */
++ __u64 ac_stimescaled; /* stime scaled on frequency etc */
++ __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */
+ };
+
+
+ /*
+ * Commands sent from userspace
+--- linux-2.6.23.orig/include/linux/topology.h
++++ linux-2.6.23/include/linux/topology.h
+@@ -157,19 +157,18 @@
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+- .idle_idx = 0, \
+- .newidle_idx = 0, \
++ .idle_idx = 1, \
++ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_NEWIDLE \
+ | SD_BALANCE_EXEC \
+ | SD_WAKE_AFFINE \
+- | SD_WAKE_IDLE \
+ | BALANCE_FOR_PKG_POWER,\
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .nr_balance_failed = 0, \
+ }
+--- linux-2.6.23.orig/init/Kconfig
++++ linux-2.6.23/init/Kconfig
+@@ -271,18 +271,44 @@ config LOG_BUF_SHIFT
+ 12 => 4 KB
+
+ config CPUSETS
+ bool "Cpuset support"
+ depends on SMP
++ #
++ # disabled for now - depends on control groups, which
++ # are hard to backport:
++ #
++ depends on 0
+ help
+ This option will let you create and manage CPUSETs which
+ allow dynamically partitioning a system into sets of CPUs and
+ Memory Nodes and assigning tasks to run only within those sets.
+ This is primarily useful on large SMP or NUMA systems.
+
+ Say N if unsure.
+
++config FAIR_GROUP_SCHED
++ bool "Fair group CPU scheduler"
++ default y
++ depends on EXPERIMENTAL
++ help
++ This feature lets CPU scheduler recognize task groups and control CPU
++ bandwidth allocation to such task groups.
++
++choice
++ depends on FAIR_GROUP_SCHED
++ prompt "Basis for grouping tasks"
++ default FAIR_USER_SCHED
++
++config FAIR_USER_SCHED
++ bool "user id"
++ help
++ This option will choose userid as the basis for grouping
++ tasks, thus providing equal CPU bandwidth to each user.
++
++endchoice
++
+ config SYSFS_DEPRECATED
+ bool "Create deprecated sysfs files"
+ default y
+ help
+ This option creates deprecated symlinks such as the
+--- linux-2.6.23.orig/init/main.c
++++ linux-2.6.23/init/main.c
+@@ -750,15 +750,12 @@ static int __init nosoftlockup_setup(cha
+ __setup("nosoftlockup", nosoftlockup_setup);
+
+ static void __init do_pre_smp_initcalls(void)
+ {
+ extern int spawn_ksoftirqd(void);
+-#ifdef CONFIG_SMP
+- extern int migration_init(void);
+
+ migration_init();
+-#endif
+ spawn_ksoftirqd();
+ if (!nosoftlockup)
+ spawn_softlockup_task();
+ }
+
+--- linux-2.6.23.orig/kernel/delayacct.c
++++ linux-2.6.23/kernel/delayacct.c
+@@ -113,15 +113,21 @@ int __delayacct_add_tsk(struct taskstats
+ tmp = (s64)d->cpu_run_real_total;
+ cputime_to_timespec(tsk->utime + tsk->stime, &ts);
+ tmp += timespec_to_ns(&ts);
+ d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
+
++ tmp = (s64)d->cpu_scaled_run_real_total;
++ cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
++ tmp += timespec_to_ns(&ts);
++ d->cpu_scaled_run_real_total =
++ (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
++
+ /*
+ * No locking available for sched_info (and too expensive to add one)
+ * Mitigate by taking snapshot of values
+ */
+- t1 = tsk->sched_info.pcnt;
++ t1 = tsk->sched_info.pcount;
+ t2 = tsk->sched_info.run_delay;
+ t3 = tsk->sched_info.cpu_time;
+
+ d->cpu_count += t1;
+
+--- linux-2.6.23.orig/kernel/exit.c
++++ linux-2.6.23/kernel/exit.c
+@@ -109,10 +109,11 @@ static void __exit_signal(struct task_st
+ * We won't ever get here for the group leader, since it
+ * will have been the last reference on the signal_struct.
+ */
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
++ sig->gtime = cputime_add(sig->gtime, tsk->gtime);
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
+ sig->inblock += task_io_get_inblock(tsk);
+@@ -1240,10 +1241,15 @@ static int wait_task_zombie(struct task_
+ psig->cstime =
+ cputime_add(psig->cstime,
+ cputime_add(p->stime,
+ cputime_add(sig->stime,
+ sig->cstime)));
++ psig->cgtime =
++ cputime_add(psig->cgtime,
++ cputime_add(p->gtime,
++ cputime_add(sig->gtime,
++ sig->cgtime)));
+ psig->cmin_flt +=
+ p->min_flt + sig->min_flt + sig->cmin_flt;
+ psig->cmaj_flt +=
+ p->maj_flt + sig->maj_flt + sig->cmaj_flt;
+ psig->cnvcsw +=
+--- linux-2.6.23.orig/kernel/fork.c
++++ linux-2.6.23/kernel/fork.c
+@@ -875,10 +875,12 @@ static inline int copy_signal(unsigned l
+
+ sig->leader = 0; /* session leadership doesn't inherit */
+ sig->tty_old_pgrp = NULL;
+
+ sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
++ sig->gtime = cputime_zero;
++ sig->cgtime = cputime_zero;
+ sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
+ sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
+ sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
+ sig->sum_sched_runtime = 0;
+ INIT_LIST_HEAD(&sig->cpu_timers[0]);
+@@ -1045,10 +1047,13 @@ static struct task_struct *copy_process(
+
+ p->utime = cputime_zero;
+ p->stime = cputime_zero;
+ p->prev_utime = cputime_zero;
+ p->prev_stime = cputime_zero;
++ p->gtime = cputime_zero;
++ p->utimescaled = cputime_zero;
++ p->stimescaled = cputime_zero;
+
+ #ifdef CONFIG_TASK_XACCT
+ p->rchar = 0; /* I/O counter: bytes read */
+ p->wchar = 0; /* I/O counter: bytes written */
+ p->syscr = 0; /* I/O counter: read syscalls */
+--- linux-2.6.23.orig/kernel/ksysfs.c
++++ linux-2.6.23/kernel/ksysfs.c
+@@ -12,10 +12,11 @@
+ #include <linux/string.h>
+ #include <linux/sysfs.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/kexec.h>
++#include <linux/sched.h>
+
+ #define KERNEL_ATTR_RO(_name) \
+ static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
+
+ #define KERNEL_ATTR_RW(_name) \
+@@ -114,9 +115,16 @@ static int __init ksysfs_init(void)
+ notes_attr.size = notes_size;
+ error = sysfs_create_bin_file(&kernel_subsys.kobj,
+ &notes_attr);
+ }
+
++ /*
++ * Create "/sys/kernel/uids" directory and corresponding root user's
++ * directory under it.
++ */
++ if (!error)
++ error = uids_kobject_init();
++
+ return error;
+ }
+
+ core_initcall(ksysfs_init);
+--- linux-2.6.23.orig/kernel/sched.c
++++ linux-2.6.23/kernel/sched.c
+@@ -42,10 +42,11 @@
+ #include <linux/profile.h>
+ #include <linux/freezer.h>
+ #include <linux/vmalloc.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
++#include <linux/pid_namespace.h>
+ #include <linux/smp.h>
+ #include <linux/threads.h>
+ #include <linux/timer.h>
+ #include <linux/rcupdate.h>
+ #include <linux/cpu.h>
+@@ -59,21 +60,23 @@
+ #include <linux/tsacct_kern.h>
+ #include <linux/kprobes.h>
+ #include <linux/delayacct.h>
+ #include <linux/reciprocal_div.h>
+ #include <linux/unistd.h>
++#include <linux/pagemap.h>
+
+ #include <asm/tlb.h>
++#include <asm/irq_regs.h>
+
+ /*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+ * Architectures and sub-architectures can override this.
+ */
+ unsigned long long __attribute__((weak)) sched_clock(void)
+ {
+- return (unsigned long long)jiffies * (1000000000 / HZ);
++ return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+ }
+
+ /*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+@@ -93,24 +96,22 @@ unsigned long long __attribute__((weak))
+ #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+ /*
+ * Some helpers for converting nanosecond timing to jiffy resolution
+ */
+-#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
+-#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
++#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
++#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ))
+
+ #define NICE_0_LOAD SCHED_LOAD_SCALE
+ #define NICE_0_SHIFT SCHED_LOAD_SHIFT
+
+ /*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
+- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
++ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+-#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
+ #define DEF_TIMESLICE (100 * HZ / 1000)
+
+ #ifdef CONFIG_SMP
+ /*
+ * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
+@@ -130,28 +131,10 @@ static inline void sg_inc_cpu_power(stru
+ sg->__cpu_power += val;
+ sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
+ }
+ #endif
+
+-#define SCALE_PRIO(x, prio) \
+- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
+-
+-/*
+- * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+- * to time slice values: [800ms ... 100ms ... 5ms]
+- */
+-static unsigned int static_prio_timeslice(int static_prio)
+-{
+- if (static_prio == NICE_TO_PRIO(19))
+- return 1;
+-
+- if (static_prio < NICE_TO_PRIO(0))
+- return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
+- else
+- return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+-}
+-
+ static inline int rt_policy(int policy)
+ {
+ if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
+ return 1;
+ return 0;
+@@ -168,45 +151,115 @@ static inline int task_has_rt_policy(str
+ struct rt_prio_array {
+ DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
+ struct list_head queue[MAX_RT_PRIO];
+ };
+
+-struct load_stat {
+- struct load_weight load;
+- u64 load_update_start, load_update_last;
+- unsigned long delta_fair, delta_exec, delta_stat;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++#include <linux/cgroup.h>
++
++struct cfs_rq;
++
++/* task group related information */
++struct task_group {
++#ifdef CONFIG_FAIR_CGROUP_SCHED
++ struct cgroup_subsys_state css;
++#endif
++ /* schedulable entities of this group on each cpu */
++ struct sched_entity **se;
++ /* runqueue "owned" by this group on each cpu */
++ struct cfs_rq **cfs_rq;
++ unsigned long shares;
++ /* spinlock to serialize modification to shares */
++ spinlock_t lock;
++ struct rcu_head rcu;
++};
++
++/* Default task group's sched entity on each cpu */
++static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
++/* Default task group's cfs_rq on each cpu */
++static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
++
++static struct sched_entity *init_sched_entity_p[NR_CPUS];
++static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
++
++/* Default task group.
++ * Every task in system belong to this group at bootup.
++ */
++struct task_group init_task_group = {
++ .se = init_sched_entity_p,
++ .cfs_rq = init_cfs_rq_p,
+ };
+
++#ifdef CONFIG_FAIR_USER_SCHED
++# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
++#else
++# define INIT_TASK_GRP_LOAD NICE_0_LOAD
++#endif
++
++static int init_task_group_load = INIT_TASK_GRP_LOAD;
++
++/* return group to which a task belongs */
++static inline struct task_group *task_group(struct task_struct *p)
++{
++ struct task_group *tg;
++
++#ifdef CONFIG_FAIR_USER_SCHED
++ tg = p->user->tg;
++#elif defined(CONFIG_FAIR_CGROUP_SCHED)
++ tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
++ struct task_group, css);
++#else
++ tg = &init_task_group;
++#endif
++ return tg;
++}
++
++/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
++static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
++{
++ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
++ p->se.parent = task_group(p)->se[cpu];
++}
++
++#else
++
++static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
++
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
+ /* CFS-related fields in a runqueue */
+ struct cfs_rq {
+ struct load_weight load;
+ unsigned long nr_running;
+
+- s64 fair_clock;
+ u64 exec_clock;
+- s64 wait_runtime;
+- u64 sleeper_bonus;
+- unsigned long wait_runtime_overruns, wait_runtime_underruns;
++ u64 min_vruntime;
+
+ struct rb_root tasks_timeline;
+ struct rb_node *rb_leftmost;
+ struct rb_node *rb_load_balance_curr;
+-#ifdef CONFIG_FAIR_GROUP_SCHED
+ /* 'curr' points to currently running entity on this cfs_rq.
+ * It is set to NULL otherwise (i.e when none are currently running).
+ */
+ struct sched_entity *curr;
++
++ unsigned long nr_spread_over;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
+
+- /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
++ /*
++ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+ * (like users, containers etc.)
+ *
+ * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+ * list is used during load balance.
+ */
+- struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
++ struct list_head leaf_cfs_rq_list;
++ struct task_group *tg; /* group that "owns" this runqueue */
+ #endif
+ };
+
+ /* Real-Time classes' related field in a runqueue: */
+ struct rt_rq {
+@@ -221,11 +274,12 @@ struct rt_rq {
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+ struct rq {
+- spinlock_t lock; /* runqueue lock */
++ /* runqueue lock: */
++ spinlock_t lock;
+
+ /*
+ * nr_running and cpu_load should be in the same cacheline because
+ * remote CPUs use both these fields when doing load calculation.
+ */
+@@ -234,19 +288,21 @@ struct rq {
+ unsigned long cpu_load[CPU_LOAD_IDX_MAX];
+ unsigned char idle_at_tick;
+ #ifdef CONFIG_NO_HZ
+ unsigned char in_nohz_recently;
+ #endif
+- struct load_stat ls; /* capture load from *all* tasks on this cpu */
++ /* capture load from *all* tasks on this cpu: */
++ struct load_weight load;
+ unsigned long nr_load_updates;
+ u64 nr_switches;
+
+ struct cfs_rq cfs;
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+- struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
++ /* list of leaf cfs_rq on this cpu: */
++ struct list_head leaf_cfs_rq_list;
+ #endif
+- struct rt_rq rt;
++ struct rt_rq rt;
+
+ /*
+ * This is part of a global counter where only the total sum
+ * over all CPUs matters. A task can increase this counter on
+ * one CPU and if it got migrated afterwards it may decrease
+@@ -272,34 +328,38 @@ struct rq {
+ struct sched_domain *sd;
+
+ /* For active balancing */
+ int active_balance;
+ int push_cpu;
+- int cpu; /* cpu of this runqueue */
++ /* cpu of this runqueue: */
++ int cpu;
+
+ struct task_struct *migration_thread;
+ struct list_head migration_queue;
+ #endif
+
+ #ifdef CONFIG_SCHEDSTATS
+ /* latency stats */
+ struct sched_info rq_sched_info;
+
+ /* sys_sched_yield() stats */
+- unsigned long yld_exp_empty;
+- unsigned long yld_act_empty;
+- unsigned long yld_both_empty;
+- unsigned long yld_cnt;
++ unsigned int yld_exp_empty;
++ unsigned int yld_act_empty;
++ unsigned int yld_both_empty;
++ unsigned int yld_count;
+
+ /* schedule() stats */
+- unsigned long sched_switch;
+- unsigned long sched_cnt;
+- unsigned long sched_goidle;
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
+
+ /* try_to_wake_up() stats */
+- unsigned long ttwu_cnt;
+- unsigned long ttwu_local;
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++
++ /* BKL stats */
++ unsigned int bkl_count;
+ #endif
+ struct lock_class_key rq_lock_key;
+ };
+
+ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+@@ -380,10 +440,45 @@ static void update_rq_clock(struct rq *r
+ #define this_rq() (&__get_cpu_var(runqueues))
+ #define task_rq(p) cpu_rq(task_cpu(p))
+ #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+
+ /*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug static const
++#endif
++
++/*
++ * Debugging: various feature bits
++ */
++enum {
++ SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
++ SCHED_FEAT_WAKEUP_PREEMPT = 2,
++ SCHED_FEAT_START_DEBIT = 4,
++ SCHED_FEAT_TREE_AVG = 8,
++ SCHED_FEAT_APPROX_AVG = 16,
++};
++
++const_debug unsigned int sysctl_sched_features =
++ SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
++ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
++ SCHED_FEAT_START_DEBIT * 1 |
++ SCHED_FEAT_TREE_AVG * 0 |
++ SCHED_FEAT_APPROX_AVG * 0;
++
++#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
++
++/*
++ * Number of tasks to iterate in a single balance run.
++ * Limited because this is done with IRQs disabled.
++ */
++const_debug unsigned int sysctl_sched_nr_migrate = 32;
++
++/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+ unsigned long long cpu_clock(int cpu)
+ {
+@@ -391,40 +486,39 @@ unsigned long long cpu_clock(int cpu)
+ unsigned long flags;
+ struct rq *rq;
+
+ local_irq_save(flags);
+ rq = cpu_rq(cpu);
+- update_rq_clock(rq);
++ /*
++ * Only call sched_clock() if the scheduler has already been
++ * initialized (some code might call cpu_clock() very early):
++ */
++ if (rq->idle)
++ update_rq_clock(rq);
+ now = rq->clock;
+ local_irq_restore(flags);
+
+ return now;
+ }
+-
+-#ifdef CONFIG_FAIR_GROUP_SCHED
+-/* Change a task's ->cfs_rq if it moves across CPUs */
+-static inline void set_task_cfs_rq(struct task_struct *p)
+-{
+- p->se.cfs_rq = &task_rq(p)->cfs;
+-}
+-#else
+-static inline void set_task_cfs_rq(struct task_struct *p)
+-{
+-}
+-#endif
++EXPORT_SYMBOL_GPL(cpu_clock);
+
+ #ifndef prepare_arch_switch
+ # define prepare_arch_switch(next) do { } while (0)
+ #endif
+ #ifndef finish_arch_switch
+ # define finish_arch_switch(prev) do { } while (0)
+ #endif
+
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
+ #ifndef __ARCH_WANT_UNLOCKED_CTXSW
+ static inline int task_running(struct rq *rq, struct task_struct *p)
+ {
+- return rq->curr == p;
++ return task_current(rq, p);
+ }
+
+ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+ {
+ }
+@@ -449,11 +543,11 @@ static inline void finish_lock_switch(st
+ static inline int task_running(struct rq *rq, struct task_struct *p)
+ {
+ #ifdef CONFIG_SMP
+ return p->oncpu;
+ #else
+- return rq->curr == p;
++ return task_current(rq, p);
+ #endif
+ }
+
+ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+ {
+@@ -494,44 +588,40 @@ static inline void finish_lock_switch(st
+ * Must be called interrupts disabled.
+ */
+ static inline struct rq *__task_rq_lock(struct task_struct *p)
+ __acquires(rq->lock)
+ {
+- struct rq *rq;
+-
+-repeat_lock_task:
+- rq = task_rq(p);
+- spin_lock(&rq->lock);
+- if (unlikely(rq != task_rq(p))) {
++ for (;;) {
++ struct rq *rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ return rq;
+ spin_unlock(&rq->lock);
+- goto repeat_lock_task;
+ }
+- return rq;
+ }
+
+ /*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+- * interrupts. Note the ordering: we can safely lookup the task_rq without
++ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(rq->lock)
+ {
+ struct rq *rq;
+
+-repeat_lock_task:
+- local_irq_save(*flags);
+- rq = task_rq(p);
+- spin_lock(&rq->lock);
+- if (unlikely(rq != task_rq(p))) {
++ for (;;) {
++ local_irq_save(*flags);
++ rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ return rq;
+ spin_unlock_irqrestore(&rq->lock, *flags);
+- goto repeat_lock_task;
+ }
+- return rq;
+ }
+
+-static inline void __task_rq_unlock(struct rq *rq)
++static void __task_rq_unlock(struct rq *rq)
+ __releases(rq->lock)
+ {
+ spin_unlock(&rq->lock);
+ }
+
+@@ -542,11 +632,11 @@ static inline void task_rq_unlock(struct
+ }
+
+ /*
+ * this_rq_lock - lock this runqueue and disable interrupts.
+ */
+-static inline struct rq *this_rq_lock(void)
++static struct rq *this_rq_lock(void)
+ __acquires(rq->lock)
+ {
+ struct rq *rq;
+
+ local_irq_disable();
+@@ -576,10 +666,11 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep
+ void sched_clock_idle_wakeup_event(u64 delta_ns)
+ {
+ struct rq *rq = cpu_rq(smp_processor_id());
+ u64 now = sched_clock();
+
++ touch_softlockup_watchdog();
+ rq->idle_clock += delta_ns;
+ /*
+ * Override the previous timestamp and ignore all
+ * sched_clock() deltas that occured while we idled,
+ * and use the PM-provided delta_ns to advance the
+@@ -642,23 +733,10 @@ static inline void resched_task(struct t
+ assert_spin_locked(&task_rq(p)->lock);
+ set_tsk_need_resched(p);
+ }
+ #endif
+
+-static u64 div64_likely32(u64 divident, unsigned long divisor)
+-{
+-#if BITS_PER_LONG == 32
+- if (likely(divident <= 0xffffffffULL))
+- return (u32)divident / divisor;
+- do_div(divident, divisor);
+-
+- return divident;
+-#else
+- return divident / divisor;
+-#endif
+-}
+-
+ #if BITS_PER_LONG == 32
+ # define WMULT_CONST (~0UL)
+ #else
+ # define WMULT_CONST (1UL << 32)
+ #endif
+@@ -696,27 +774,25 @@ static inline unsigned long
+ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
+ {
+ return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
+ }
+
+-static void update_load_add(struct load_weight *lw, unsigned long inc)
++static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+ {
+ lw->weight += inc;
+- lw->inv_weight = 0;
+ }
+
+-static void update_load_sub(struct load_weight *lw, unsigned long dec)
++static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
+ {
+ lw->weight -= dec;
+- lw->inv_weight = 0;
+ }
+
+ /*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
++ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+ #define WEIGHT_IDLEPRIO 2
+@@ -774,76 +850,62 @@ struct rq_iterator {
+ void *arg;
+ struct task_struct *(*start)(void *);
+ struct task_struct *(*next)(void *);
+ };
+
+-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, unsigned long *load_moved,
+- int *this_best_prio, struct rq_iterator *iterator);
++#ifdef CONFIG_SMP
++static unsigned long
++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move, struct sched_domain *sd,
++ enum cpu_idle_type idle, int *all_pinned,
++ int *this_best_prio, struct rq_iterator *iterator);
++
++static int
++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ struct rq_iterator *iterator);
++#endif
++
++#ifdef CONFIG_CGROUP_CPUACCT
++static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
++#else
++static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
++#endif
+
+ #include "sched_stats.h"
+-#include "sched_rt.c"
+-#include "sched_fair.c"
+ #include "sched_idletask.c"
++#include "sched_fair.c"
++#include "sched_rt.c"
+ #ifdef CONFIG_SCHED_DEBUG
+ # include "sched_debug.c"
+ #endif
+
+ #define sched_class_highest (&rt_sched_class)
+
+-static void __update_curr_load(struct rq *rq, struct load_stat *ls)
+-{
+- if (rq->curr != rq->idle && ls->load.weight) {
+- ls->delta_exec += ls->delta_stat;
+- ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
+- ls->delta_stat = 0;
+- }
+-}
+-
+ /*
+ * Update delta_exec, delta_fair fields for rq.
+ *
+ * delta_fair clock advances at a rate inversely proportional to
+- * total load (rq->ls.load.weight) on the runqueue, while
++ * total load (rq->load.weight) on the runqueue, while
+ * delta_exec advances at the same rate as wall-clock (provided
+ * cpu is not idle).
+ *
+ * delta_exec / delta_fair is a measure of the (smoothened) load on this
+ * runqueue over any given interval. This (smoothened) load is used
+ * during load balance.
+ *
+- * This function is called /before/ updating rq->ls.load
++ * This function is called /before/ updating rq->load
+ * and when switching tasks.
+ */
+-static void update_curr_load(struct rq *rq)
+-{
+- struct load_stat *ls = &rq->ls;
+- u64 start;
+-
+- start = ls->load_update_start;
+- ls->load_update_start = rq->clock;
+- ls->delta_stat += rq->clock - start;
+- /*
+- * Stagger updates to ls->delta_fair. Very frequent updates
+- * can be expensive.
+- */
+- if (ls->delta_stat >= sysctl_sched_stat_granularity)
+- __update_curr_load(rq, ls);
+-}
+-
+ static inline void inc_load(struct rq *rq, const struct task_struct *p)
+ {
+- update_curr_load(rq);
+- update_load_add(&rq->ls.load, p->se.load.weight);
++ update_load_add(&rq->load, p->se.load.weight);
+ }
+
+ static inline void dec_load(struct rq *rq, const struct task_struct *p)
+ {
+- update_curr_load(rq);
+- update_load_sub(&rq->ls.load, p->se.load.weight);
++ update_load_sub(&rq->load, p->se.load.weight);
+ }
+
+ static void inc_nr_running(struct task_struct *p, struct rq *rq)
+ {
+ rq->nr_running++;
+@@ -856,12 +918,10 @@ static void dec_nr_running(struct task_s
+ dec_load(rq, p);
+ }
+
+ static void set_load_weight(struct task_struct *p)
+ {
+- p->se.wait_runtime = 0;
+-
+ if (task_has_rt_policy(p)) {
+ p->se.load.weight = prio_to_weight[0] * 2;
+ p->se.load.inv_weight = prio_to_wmult[0] >> 1;
+ return;
+ }
+@@ -949,24 +1009,10 @@ static void activate_task(struct rq *rq,
+ enqueue_task(rq, p, wakeup);
+ inc_nr_running(p, rq);
+ }
+
+ /*
+- * activate_idle_task - move idle task to the _front_ of runqueue.
+- */
+-static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
+-{
+- update_rq_clock(rq);
+-
+- if (p->state == TASK_UNINTERRUPTIBLE)
+- rq->nr_uninterruptible--;
+-
+- enqueue_task(rq, p, 0);
+- inc_nr_running(p, rq);
+-}
+-
+-/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
+ {
+ if (p->state == TASK_UNINTERRUPTIBLE)
+@@ -986,45 +1032,76 @@ inline int task_curr(const struct task_s
+ }
+
+ /* Used instead of source_load when we know the type == 0 */
+ unsigned long weighted_cpuload(const int cpu)
+ {
+- return cpu_rq(cpu)->ls.load.weight;
++ return cpu_rq(cpu)->load.weight;
+ }
+
+ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+ {
++ set_task_cfs_rq(p, cpu);
+ #ifdef CONFIG_SMP
++ /*
++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
++ * successfuly executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
+ task_thread_info(p)->cpu = cpu;
+- set_task_cfs_rq(p);
+ #endif
+ }
+
+ #ifdef CONFIG_SMP
+
++/*
++ * Is this task likely cache-hot:
++ */
++static inline int
++task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
++{
++ s64 delta;
++
++ if (p->sched_class != &fair_sched_class)
++ return 0;
++
++ if (sysctl_sched_migration_cost == -1)
++ return 1;
++ if (sysctl_sched_migration_cost == 0)
++ return 0;
++
++ delta = now - p->se.exec_start;
++
++ return delta < (s64)sysctl_sched_migration_cost;
++}
++
++
+ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ {
+ int old_cpu = task_cpu(p);
+ struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
+- u64 clock_offset, fair_clock_offset;
++ struct cfs_rq *old_cfsrq = task_cfs_rq(p),
++ *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
++ u64 clock_offset;
+
+ clock_offset = old_rq->clock - new_rq->clock;
+- fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
+-
+- if (p->se.wait_start_fair)
+- p->se.wait_start_fair -= fair_clock_offset;
+- if (p->se.sleep_start_fair)
+- p->se.sleep_start_fair -= fair_clock_offset;
+
+ #ifdef CONFIG_SCHEDSTATS
+ if (p->se.wait_start)
+ p->se.wait_start -= clock_offset;
+ if (p->se.sleep_start)
+ p->se.sleep_start -= clock_offset;
+ if (p->se.block_start)
+ p->se.block_start -= clock_offset;
++ if (old_cpu != new_cpu) {
++ schedstat_inc(p, se.nr_migrations);
++ if (task_hot(p, old_rq->clock, NULL))
++ schedstat_inc(p, se.nr_forced2_migrations);
++ }
+ #endif
++ p->se.vruntime -= old_cfsrq->min_vruntime -
++ new_cfsrq->min_vruntime;
+
+ __set_task_cpu(p, new_cpu);
+ }
+
+ struct migration_req {
+@@ -1075,73 +1152,75 @@ void wait_task_inactive(struct task_stru
+ {
+ unsigned long flags;
+ int running, on_rq;
+ struct rq *rq;
+
+-repeat:
+- /*
+- * We do the initial early heuristics without holding
+- * any task-queue locks at all. We'll only try to get
+- * the runqueue lock when things look like they will
+- * work out!
+- */
+- rq = task_rq(p);
++ for (;;) {
++ /*
++ * We do the initial early heuristics without holding
++ * any task-queue locks at all. We'll only try to get
++ * the runqueue lock when things look like they will
++ * work out!
++ */
++ rq = task_rq(p);
+
+- /*
+- * If the task is actively running on another CPU
+- * still, just relax and busy-wait without holding
+- * any locks.
+- *
+- * NOTE! Since we don't hold any locks, it's not
+- * even sure that "rq" stays as the right runqueue!
+- * But we don't care, since "task_running()" will
+- * return false if the runqueue has changed and p
+- * is actually now running somewhere else!
+- */
+- while (task_running(rq, p))
+- cpu_relax();
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since "task_running()" will
++ * return false if the runqueue has changed and p
++ * is actually now running somewhere else!
++ */
++ while (task_running(rq, p))
++ cpu_relax();
+
+- /*
+- * Ok, time to look more closely! We need the rq
+- * lock now, to be *sure*. If we're wrong, we'll
+- * just go back and repeat.
+- */
+- rq = task_rq_lock(p, &flags);
+- running = task_running(rq, p);
+- on_rq = p->se.on_rq;
+- task_rq_unlock(rq, &flags);
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ running = task_running(rq, p);
++ on_rq = p->se.on_rq;
++ task_rq_unlock(rq, &flags);
+
+- /*
+- * Was it really running after all now that we
+- * checked with the proper locks actually held?
+- *
+- * Oops. Go back and try again..
+- */
+- if (unlikely(running)) {
+- cpu_relax();
+- goto repeat;
+- }
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
+
+- /*
+- * It's not enough that it's not actively running,
+- * it must be off the runqueue _entirely_, and not
+- * preempted!
+- *
+- * So if it wa still runnable (but just not actively
+- * running right now), it's preempted, and we should
+- * yield - it could be a while.
+- */
+- if (unlikely(on_rq)) {
+- yield();
+- goto repeat;
+- }
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it wa still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(on_rq)) {
++ schedule_timeout_uninterruptible(1);
++ continue;
++ }
+
+- /*
+- * Ahh, all good. It wasn't running, and it wasn't
+- * runnable, which means that it will never become
+- * running in the future either. We're all done!
+- */
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
+ }
+
+ /***
+ * kick_process - kick a running thread to enter/exit the kernel
+ * @p: the to-be-kicked thread
+@@ -1171,11 +1250,11 @@ void kick_process(struct task_struct *p)
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+-static inline unsigned long source_load(int cpu, int type)
++static unsigned long source_load(int cpu, int type)
+ {
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0)
+@@ -1186,11 +1265,11 @@ static inline unsigned long source_load(
+
+ /*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+-static inline unsigned long target_load(int cpu, int type)
++static unsigned long target_load(int cpu, int type)
+ {
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0)
+@@ -1228,11 +1307,11 @@ find_idlest_group(struct sched_domain *s
+ int local_group;
+ int i;
+
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+- goto nextgroup;
++ continue;
+
+ local_group = cpu_isset(this_cpu, group->cpumask);
+
+ /* Tally up the load of all CPUs in the group */
+ avg_load = 0;
+@@ -1256,13 +1335,11 @@ find_idlest_group(struct sched_domain *s
+ this = group;
+ } else if (avg_load < min_load) {
+ min_load = avg_load;
+ idlest = group;
+ }
+-nextgroup:
+- group = group->next;
+- } while (group != sd->groups);
++ } while (group = group->next, group != sd->groups);
+
+ if (!idlest || 100*this_load < imbalance*min_load)
+ return NULL;
+ return idlest;
+ }
+@@ -1390,12 +1467,17 @@ static int wake_idle(int cpu, struct tas
+
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_IDLE) {
+ cpus_and(tmp, sd->span, p->cpus_allowed);
+ for_each_cpu_mask(i, tmp) {
+- if (idle_cpu(i))
++ if (idle_cpu(i)) {
++ if (i != task_cpu(p)) {
++ schedstat_inc(p,
++ se.nr_wakeups_idle);
++ }
+ return i;
++ }
+ }
+ } else {
+ break;
+ }
+ }
+@@ -1422,11 +1504,11 @@ static inline int wake_idle(int cpu, str
+ *
+ * returns failure only if the task is already active.
+ */
+ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+ {
+- int cpu, this_cpu, success = 0;
++ int cpu, orig_cpu, this_cpu, success = 0;
+ unsigned long flags;
+ long old_state;
+ struct rq *rq;
+ #ifdef CONFIG_SMP
+ struct sched_domain *sd, *this_sd = NULL;
+@@ -1441,19 +1523,20 @@ static int try_to_wake_up(struct task_st
+
+ if (p->se.on_rq)
+ goto out_running;
+
+ cpu = task_cpu(p);
++ orig_cpu = cpu;
+ this_cpu = smp_processor_id();
+
+ #ifdef CONFIG_SMP
+ if (unlikely(task_running(rq, p)))
+ goto out_activate;
+
+ new_cpu = cpu;
+
+- schedstat_inc(rq, ttwu_cnt);
++ schedstat_inc(rq, ttwu_count);
+ if (cpu == this_cpu) {
+ schedstat_inc(rq, ttwu_local);
+ goto out_set_cpu;
+ }
+
+@@ -1484,10 +1567,17 @@ static int try_to_wake_up(struct task_st
+
+ if (this_sd->flags & SD_WAKE_AFFINE) {
+ unsigned long tl = this_load;
+ unsigned long tl_per_task;
+
++ /*
++ * Attract cache-cold tasks on sync wakeups:
++ */
++ if (sync && !task_hot(p, rq->clock, this_sd))
++ goto out_set_cpu;
++
++ schedstat_inc(p, se.nr_wakeups_affine_attempts);
+ tl_per_task = cpu_avg_load_per_task(this_cpu);
+
+ /*
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+@@ -1503,10 +1593,11 @@ static int try_to_wake_up(struct task_st
+ * This domain has SD_WAKE_AFFINE and
+ * p is cache cold in this domain, and
+ * there is no bad imbalance.
+ */
+ schedstat_inc(this_sd, ttwu_move_affine);
++ schedstat_inc(p, se.nr_wakeups_affine);
+ goto out_set_cpu;
+ }
+ }
+
+ /*
+@@ -1514,10 +1605,11 @@ static int try_to_wake_up(struct task_st
+ * limit is reached.
+ */
+ if (this_sd->flags & SD_WAKE_BALANCE) {
+ if (imbalance*this_load <= 100*load) {
+ schedstat_inc(this_sd, ttwu_move_balance);
++ schedstat_inc(p, se.nr_wakeups_passive);
+ goto out_set_cpu;
+ }
+ }
+ }
+
+@@ -1539,22 +1631,22 @@ out_set_cpu:
+ cpu = task_cpu(p);
+ }
+
+ out_activate:
+ #endif /* CONFIG_SMP */
++ schedstat_inc(p, se.nr_wakeups);
++ if (sync)
++ schedstat_inc(p, se.nr_wakeups_sync);
++ if (orig_cpu != cpu)
++ schedstat_inc(p, se.nr_wakeups_migrate);
++ if (cpu == this_cpu)
++ schedstat_inc(p, se.nr_wakeups_local);
++ else
++ schedstat_inc(p, se.nr_wakeups_remote);
+ update_rq_clock(rq);
+ activate_task(rq, p, 1);
+- /*
+- * Sync wakeups (i.e. those types of wakeups where the waker
+- * has indicated that it will leave the CPU in short order)
+- * don't trigger a preemption, if the woken up task will run on
+- * this cpu. (in this case the 'I will reschedule' promise of
+- * the waker guarantees that the freshly woken up task is going
+- * to be considered on this CPU.)
+- */
+- if (!sync || cpu != this_cpu)
+- check_preempt_curr(rq, p);
++ check_preempt_curr(rq, p);
+ success = 1;
+
+ out_running:
+ p->state = TASK_RUNNING;
+ out:
+@@ -1581,32 +1673,24 @@ int fastcall wake_up_state(struct task_s
+ *
+ * __sched_fork() is basic setup used by init_idle() too:
+ */
+ static void __sched_fork(struct task_struct *p)
+ {
+- p->se.wait_start_fair = 0;
+ p->se.exec_start = 0;
+ p->se.sum_exec_runtime = 0;
+ p->se.prev_sum_exec_runtime = 0;
+- p->se.delta_exec = 0;
+- p->se.delta_fair_run = 0;
+- p->se.delta_fair_sleep = 0;
+- p->se.wait_runtime = 0;
+- p->se.sleep_start_fair = 0;
+
+ #ifdef CONFIG_SCHEDSTATS
+ p->se.wait_start = 0;
+- p->se.sum_wait_runtime = 0;
+ p->se.sum_sleep_runtime = 0;
+ p->se.sleep_start = 0;
+ p->se.block_start = 0;
+ p->se.sleep_max = 0;
+ p->se.block_max = 0;
+ p->se.exec_max = 0;
++ p->se.slice_max = 0;
+ p->se.wait_max = 0;
+- p->se.wait_runtime_overruns = 0;
+- p->se.wait_runtime_underruns = 0;
+ #endif
+
+ INIT_LIST_HEAD(&p->run_list);
+ p->se.on_rq = 0;
+
+@@ -1633,16 +1717,18 @@ void sched_fork(struct task_struct *p, i
+ __sched_fork(p);
+
+ #ifdef CONFIG_SMP
+ cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
+ #endif
+- __set_task_cpu(p, cpu);
++ set_task_cpu(p, cpu);
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child:
+ */
+ p->prio = current->normal_prio;
++ if (!rt_prio(p->prio))
++ p->sched_class = &fair_sched_class;
+
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ if (likely(sched_info_on()))
+ memset(&p->sched_info, 0, sizeof(p->sched_info));
+ #endif
+@@ -1655,44 +1741,28 @@ void sched_fork(struct task_struct *p, i
+ #endif
+ put_cpu();
+ }
+
+ /*
+- * After fork, child runs first. (default) If set to 0 then
+- * parent will (try to) run first.
+- */
+-unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
+-
+-/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
+ {
+ unsigned long flags;
+ struct rq *rq;
+- int this_cpu;
+
+ rq = task_rq_lock(p, &flags);
+ BUG_ON(p->state != TASK_RUNNING);
+- this_cpu = smp_processor_id(); /* parent's CPU */
+ update_rq_clock(rq);
+
+ p->prio = effective_prio(p);
+
+- if (rt_prio(p->prio))
+- p->sched_class = &rt_sched_class;
+- else
+- p->sched_class = &fair_sched_class;
+-
+- if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
+- (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
+- !current->se.on_rq) {
+-
++ if (!p->sched_class->task_new || !current->se.on_rq) {
+ activate_task(rq, p, 0);
+ } else {
+ /*
+ * Let the scheduling class do new task startup
+ * management (if any):
+@@ -1793,15 +1863,15 @@ prepare_task_switch(struct rq *rq, struc
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+- * so, we finish that here outside of the runqueue lock. (Doing it
++ * so, we finish that here outside of the runqueue lock. (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ */
+-static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
++static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+ __releases(rq->lock)
+ {
+ struct mm_struct *mm = rq->prev_mm;
+ long prev_state;
+
+@@ -1847,11 +1917,11 @@ asmlinkage void schedule_tail(struct tas
+ #ifdef __ARCH_WANT_UNLOCKED_CTXSW
+ /* In this case, finish_task_switch does not reenable preemption */
+ preempt_enable();
+ #endif
+ if (current->set_child_tid)
+- put_user(current->pid, current->set_child_tid);
++ put_user(task_pid_vnr(current), current->set_child_tid);
+ }
+
+ /*
+ * context_switch - switch to the new MM and the new
+ * thread's register state.
+@@ -1979,56 +2049,30 @@ unsigned long nr_active(void)
+ * Update rq->cpu_load[] statistics. This function is usually called every
+ * scheduler tick (TICK_NSEC).
+ */
+ static void update_cpu_load(struct rq *this_rq)
+ {
+- u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
+- unsigned long total_load = this_rq->ls.load.weight;
+- unsigned long this_load = total_load;
+- struct load_stat *ls = &this_rq->ls;
++ unsigned long this_load = this_rq->load.weight;
+ int i, scale;
+
+ this_rq->nr_load_updates++;
+- if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
+- goto do_avg;
+-
+- /* Update delta_fair/delta_exec fields first */
+- update_curr_load(this_rq);
+-
+- fair_delta64 = ls->delta_fair + 1;
+- ls->delta_fair = 0;
+-
+- exec_delta64 = ls->delta_exec + 1;
+- ls->delta_exec = 0;
+-
+- sample_interval64 = this_rq->clock - ls->load_update_last;
+- ls->load_update_last = this_rq->clock;
+-
+- if ((s64)sample_interval64 < (s64)TICK_NSEC)
+- sample_interval64 = TICK_NSEC;
+-
+- if (exec_delta64 > sample_interval64)
+- exec_delta64 = sample_interval64;
+-
+- idle_delta64 = sample_interval64 - exec_delta64;
+-
+- tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
+- tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
+-
+- this_load = (unsigned long)tmp64;
+-
+-do_avg:
+
+ /* Update our load: */
+ for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
+ unsigned long old_load, new_load;
+
+ /* scale is effectively 1 << i now, and >> i divides by scale */
+
+ old_load = this_rq->cpu_load[i];
+ new_load = this_load;
+-
++ /*
++ * Round up the averaging division if load is increasing. This
++ * prevents us from getting stuck on 9 if the load is 10, for
++ * example.
++ */
++ if (new_load > old_load)
++ new_load += scale-1;
+ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
+ }
+ }
+
+ #ifdef CONFIG_SMP
+@@ -2101,11 +2145,11 @@ static void double_lock_balance(struct r
+ }
+
+ /*
+ * If dest_cpu is allowed for this process, migrate the task to it.
+ * This is accomplished by forcing the cpu_allowed mask to only
+- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
++ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * the cpu_allowed mask is restored.
+ */
+ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
+ {
+ struct migration_req req;
+@@ -2176,44 +2220,69 @@ int can_migrate_task(struct task_struct
+ * We do not migrate tasks that are:
+ * 1) running (obviously), or
+ * 2) cannot be migrated to this CPU due to cpus_allowed, or
+ * 3) are cache-hot on their current CPU.
+ */
+- if (!cpu_isset(this_cpu, p->cpus_allowed))
++ if (!cpu_isset(this_cpu, p->cpus_allowed)) {
++ schedstat_inc(p, se.nr_failed_migrations_affine);
+ return 0;
++ }
+ *all_pinned = 0;
+
+- if (task_running(rq, p))
++ if (task_running(rq, p)) {
++ schedstat_inc(p, se.nr_failed_migrations_running);
+ return 0;
++ }
++
++ /*
++ * Aggressive migration if:
++ * 1) task is cache cold, or
++ * 2) too many balance attempts have failed.
++ */
++
++ if (!task_hot(p, rq->clock, sd) ||
++ sd->nr_balance_failed > sd->cache_nice_tries) {
++#ifdef CONFIG_SCHEDSTATS
++ if (task_hot(p, rq->clock, sd)) {
++ schedstat_inc(sd, lb_hot_gained[idle]);
++ schedstat_inc(p, se.nr_forced_migrations);
++ }
++#endif
++ return 1;
++ }
+
++ if (task_hot(p, rq->clock, sd)) {
++ schedstat_inc(p, se.nr_failed_migrations_hot);
++ return 0;
++ }
+ return 1;
+ }
+
+-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, unsigned long *load_moved,
+- int *this_best_prio, struct rq_iterator *iterator)
++static unsigned long
++balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_load_move, struct sched_domain *sd,
++ enum cpu_idle_type idle, int *all_pinned,
++ int *this_best_prio, struct rq_iterator *iterator)
+ {
+- int pulled = 0, pinned = 0, skip_for_load;
++ int loops = 0, pulled = 0, pinned = 0, skip_for_load;
+ struct task_struct *p;
+ long rem_load_move = max_load_move;
+
+- if (max_nr_move == 0 || max_load_move == 0)
++ if (max_load_move == 0)
+ goto out;
+
+ pinned = 1;
+
+ /*
+ * Start the load-balancing iterator:
+ */
+ p = iterator->start(iterator->arg);
+ next:
+- if (!p)
++ if (!p || loops++ > sysctl_sched_nr_migrate)
+ goto out;
+ /*
+- * To help distribute high priority tasks accross CPUs we don't
++ * To help distribute high priority tasks across CPUs we don't
+ * skip a task if it will be the highest priority task (i.e. smallest
+ * prio value) on its new queue regardless of its load weight
+ */
+ skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
+ SCHED_LOAD_SCALE_FUZZ;
+@@ -2226,31 +2295,30 @@ next:
+ pull_task(busiest, p, this_rq, this_cpu);
+ pulled++;
+ rem_load_move -= p->se.load.weight;
+
+ /*
+- * We only want to steal up to the prescribed number of tasks
+- * and the prescribed amount of weighted load.
++ * We only want to steal up to the prescribed amount of weighted load.
+ */
+- if (pulled < max_nr_move && rem_load_move > 0) {
++ if (rem_load_move > 0) {
+ if (p->prio < *this_best_prio)
+ *this_best_prio = p->prio;
+ p = iterator->next(iterator->arg);
+ goto next;
+ }
+ out:
+ /*
+- * Right now, this is the only place pull_task() is called,
++ * Right now, this is one of only two places pull_task() is called,
+ * so we can safely collect pull_task() stats here rather than
+ * inside pull_task().
+ */
+ schedstat_add(sd, lb_gained[idle], pulled);
+
+ if (all_pinned)
+ *all_pinned = pinned;
+- *load_moved = max_load_move - rem_load_move;
+- return pulled;
++
++ return max_load_move - rem_load_move;
+ }
+
+ /*
+ * move_tasks tries to move up to max_load_move weighted load from busiest to
+ * this_rq, as part of a balancing operation within domain "sd".
+@@ -2261,42 +2329,65 @@ out:
+ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned)
+ {
+- struct sched_class *class = sched_class_highest;
++ const struct sched_class *class = sched_class_highest;
+ unsigned long total_load_moved = 0;
+ int this_best_prio = this_rq->curr->prio;
+
+ do {
+ total_load_moved +=
+ class->load_balance(this_rq, this_cpu, busiest,
+- ULONG_MAX, max_load_move - total_load_moved,
++ max_load_move - total_load_moved,
+ sd, idle, all_pinned, &this_best_prio);
+ class = class->next;
+ } while (class && max_load_move > total_load_moved);
+
+ return total_load_moved > 0;
+ }
+
++static int
++iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ struct rq_iterator *iterator)
++{
++ struct task_struct *p = iterator->start(iterator->arg);
++ int pinned = 0;
++
++ while (p) {
++ if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
++ pull_task(busiest, p, this_rq, this_cpu);
++ /*
++ * Right now, this is only the second place pull_task()
++ * is called, so we can safely collect pull_task()
++ * stats here rather than inside pull_task().
++ */
++ schedstat_inc(sd, lb_gained[idle]);
++
++ return 1;
++ }
++ p = iterator->next(iterator->arg);
++ }
++
++ return 0;
++}
++
+ /*
+ * move_one_task tries to move exactly one task from busiest to this_rq, as
+ * part of active balancing operations within "domain".
+ * Returns 1 if successful and 0 otherwise.
+ *
+ * Called with both runqueues locked.
+ */
+ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ struct sched_domain *sd, enum cpu_idle_type idle)
+ {
+- struct sched_class *class;
+- int this_best_prio = MAX_PRIO;
++ const struct sched_class *class;
+
+ for (class = sched_class_highest; class; class = class->next)
+- if (class->load_balance(this_rq, this_cpu, busiest,
+- 1, ULONG_MAX, sd, idle, NULL,
+- &this_best_prio))
++ if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
+ return 1;
+
+ return 0;
+ }
+
+@@ -2313,11 +2404,11 @@ find_busiest_group(struct sched_domain *
+ struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
+ unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+ unsigned long max_pull;
+ unsigned long busiest_load_per_task, busiest_nr_running;
+ unsigned long this_load_per_task, this_nr_running;
+- int load_idx;
++ int load_idx, group_imb = 0;
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ int power_savings_balance = 1;
+ unsigned long leader_nr_running = 0, min_load_per_task = 0;
+ unsigned long min_nr_running = ULONG_MAX;
+ struct sched_group *group_min = NULL, *group_leader = NULL;
+@@ -2332,23 +2423,26 @@ find_busiest_group(struct sched_domain *
+ load_idx = sd->newidle_idx;
+ else
+ load_idx = sd->idle_idx;
+
+ do {
+- unsigned long load, group_capacity;
++ unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
+ int local_group;
+ int i;
++ int __group_imb = 0;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
+ unsigned long sum_nr_running, sum_weighted_load;
+
+ local_group = cpu_isset(this_cpu, group->cpumask);
+
+ if (local_group)
+ balance_cpu = first_cpu(group->cpumask);
+
+ /* Tally up the load of all CPUs in the group */
+ sum_weighted_load = sum_nr_running = avg_load = 0;
++ max_cpu_load = 0;
++ min_cpu_load = ~0UL;
+
+ for_each_cpu_mask(i, group->cpumask) {
+ struct rq *rq;
+
+ if (!cpu_isset(i, *cpus))
+@@ -2365,12 +2459,17 @@ find_busiest_group(struct sched_domain *
+ first_idle_cpu = 1;
+ balance_cpu = i;
+ }
+
+ load = target_load(i, load_idx);
+- } else
++ } else {
+ load = source_load(i, load_idx);
++ if (load > max_cpu_load)
++ max_cpu_load = load;
++ if (min_cpu_load > load)
++ min_cpu_load = load;
++ }
+
+ avg_load += load;
+ sum_nr_running += rq->nr_running;
+ sum_weighted_load += weighted_cpuload(i);
+ }
+@@ -2392,23 +2491,27 @@ find_busiest_group(struct sched_domain *
+
+ /* Adjust by relative CPU power of the group */
+ avg_load = sg_div_cpu_power(group,
+ avg_load * SCHED_LOAD_SCALE);
+
++ if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
++ __group_imb = 1;
++
+ group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+
+ if (local_group) {
+ this_load = avg_load;
+ this = group;
+ this_nr_running = sum_nr_running;
+ this_load_per_task = sum_weighted_load;
+ } else if (avg_load > max_load &&
+- sum_nr_running > group_capacity) {
++ (sum_nr_running > group_capacity || __group_imb)) {
+ max_load = avg_load;
+ busiest = group;
+ busiest_nr_running = sum_nr_running;
+ busiest_load_per_task = sum_weighted_load;
++ group_imb = __group_imb;
+ }
+
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ /*
+ * Busy processors will not participate in power savings
+@@ -2476,19 +2579,22 @@ group_next:
+ if (this_load >= avg_load ||
+ 100*max_load <= sd->imbalance_pct*this_load)
+ goto out_balanced;
+
+ busiest_load_per_task /= busiest_nr_running;
++ if (group_imb)
++ busiest_load_per_task = min(busiest_load_per_task, avg_load);
++
+ /*
+ * We're trying to get all the cpus to the average_load, so we don't
+ * want to push ourselves above the average load, nor do we wish to
+ * reduce the max loaded cpu below the average load, as either of these
+ * actions would just result in more rebalancing later, and ping-pong
+ * tasks around. Thus we look for the minimum possible imbalance.
+ * Negative imbalances (*we* are more loaded than anyone else) will
+ * be counted as no imbalance for these purposes -- we can't fix that
+- * by pulling tasks to us. Be careful of negative numbers as they'll
++ * by pulling tasks to us. Be careful of negative numbers as they'll
+ * appear as very large values with unsigned longs.
+ */
+ if (max_load <= busiest_load_per_task)
+ goto out_balanced;
+
+@@ -2650,11 +2756,11 @@ static int load_balance(int this_cpu, st
+ */
+ if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
+ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
+ sd_idle = 1;
+
+- schedstat_inc(sd, lb_cnt[idle]);
++ schedstat_inc(sd, lb_count[idle]);
+
+ redo:
+ group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+ &cpus, balance);
+
+@@ -2803,11 +2909,11 @@ load_balance_newidle(int this_cpu, struc
+ */
+ if (sd->flags & SD_SHARE_CPUPOWER &&
+ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
+ sd_idle = 1;
+
+- schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
++ schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
+ redo:
+ group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
+ &sd_idle, &cpus, NULL);
+ if (!group) {
+ schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
+@@ -2919,11 +3025,11 @@ static void active_load_balance(struct r
+
+ target_rq = cpu_rq(target_cpu);
+
+ /*
+ * This condition is "impossible", if it occurs
+- * we need to fix it. Originally reported by
++ * we need to fix it. Originally reported by
+ * Bjorn Helgaas on a 128-cpu setup.
+ */
+ BUG_ON(busiest_rq == target_rq);
+
+ /* move a task from busiest_rq to target_rq */
+@@ -2937,11 +3043,11 @@ static void active_load_balance(struct r
+ cpu_isset(busiest_cpu, sd->span))
+ break;
+ }
+
+ if (likely(sd)) {
+- schedstat_inc(sd, alb_cnt);
++ schedstat_inc(sd, alb_count);
+
+ if (move_one_task(target_rq, target_cpu, busiest_rq,
+ sd, CPU_IDLE))
+ schedstat_inc(sd, alb_pushed);
+ else
+@@ -2951,11 +3057,11 @@ static void active_load_balance(struct r
+ }
+
+ #ifdef CONFIG_NO_HZ
+ static struct {
+ atomic_t load_balancer;
+- cpumask_t cpu_mask;
++ cpumask_t cpu_mask;
+ } nohz ____cacheline_aligned = {
+ .load_balancer = ATOMIC_INIT(-1),
+ .cpu_mask = CPU_MASK_NONE,
+ };
+
+@@ -3030,11 +3136,11 @@ static DEFINE_SPINLOCK(balancing);
+ * It checks each scheduling domain to see if it is due to be balanced,
+ * and initiates a balancing operation if so.
+ *
+ * Balancing parameters are set up in arch_init_sched_domains.
+ */
+-static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
++static void rebalance_domains(int cpu, enum cpu_idle_type idle)
+ {
+ int balance = 1;
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long interval;
+ struct sched_domain *sd;
+@@ -3214,22 +3320,10 @@ static inline void trigger_load_balance(
+ */
+ static inline void idle_balance(int cpu, struct rq *rq)
+ {
+ }
+
+-/* Avoid "used but not defined" warning on UP */
+-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, unsigned long *load_moved,
+- int *this_best_prio, struct rq_iterator *iterator)
+-{
+- *load_moved = 0;
+-
+- return 0;
+-}
+-
+ #endif
+
+ DEFINE_PER_CPU(struct kernel_stat, kstat);
+
+ EXPORT_PER_CPU_SYMBOL(kstat);
+@@ -3244,11 +3338,11 @@ unsigned long long task_sched_runtime(st
+ u64 ns, delta_exec;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &flags);
+ ns = p->se.sum_exec_runtime;
+- if (rq->curr == p) {
++ if (task_current(rq, p)) {
+ update_rq_clock(rq);
+ delta_exec = rq->clock - p->se.exec_start;
+ if ((s64)delta_exec > 0)
+ ns += delta_exec;
+ }
+@@ -3258,11 +3352,10 @@ unsigned long long task_sched_runtime(st
+ }
+
+ /*
+ * Account user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+- * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in user space since the last update
+ */
+ void account_user_time(struct task_struct *p, cputime_t cputime)
+ {
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+@@ -3277,10 +3370,39 @@ void account_user_time(struct task_struc
+ else
+ cpustat->user = cputime64_add(cpustat->user, tmp);
+ }
+
+ /*
++ * Account guest cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in virtual machine since the last update
++ */
++static void account_guest_time(struct task_struct *p, cputime_t cputime)
++{
++ cputime64_t tmp;
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++
++ tmp = cputime_to_cputime64(cputime);
++
++ p->utime = cputime_add(p->utime, cputime);
++ p->gtime = cputime_add(p->gtime, cputime);
++
++ cpustat->user = cputime64_add(cpustat->user, tmp);
++ cpustat->guest = cputime64_add(cpustat->guest, tmp);
++}
++
++/*
++ * Account scaled user cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @cputime: the cpu time spent in user space since the last update
++ */
++void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
++{
++ p->utimescaled = cputime_add(p->utimescaled, cputime);
++}
++
++/*
+ * Account system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ */
+@@ -3289,10 +3411,13 @@ void account_system_time(struct task_str
+ {
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ struct rq *rq = this_rq();
+ cputime64_t tmp;
+
++ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
++ return account_guest_time(p, cputime);
++
+ p->stime = cputime_add(p->stime, cputime);
+
+ /* Add system time to cpustat. */
+ tmp = cputime_to_cputime64(cputime);
+ if (hardirq_count() - hardirq_offset)
+@@ -3308,10 +3433,21 @@ void account_system_time(struct task_str
+ /* Account for system time used */
+ acct_update_integrals(p);
+ }
+
+ /*
++ * Account scaled system cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @hardirq_offset: the offset to subtract from hardirq_count()
++ * @cputime: the cpu time spent in kernel space since the last update
++ */
++void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
++{
++ p->stimescaled = cputime_add(p->stimescaled, cputime);
++}
++
++/*
+ * Account for involuntary wait time.
+ * @p: the process from which the cpu time has been stolen
+ * @steal: the cpu time spent in involuntary wait
+ */
+ void account_steal_time(struct task_struct *p, cputime_t steal)
+@@ -3404,43 +3540,56 @@ EXPORT_SYMBOL(sub_preempt_count);
+ /*
+ * Print scheduling while atomic bug:
+ */
+ static noinline void __schedule_bug(struct task_struct *prev)
+ {
+- printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
+- prev->comm, preempt_count(), prev->pid);
++ struct pt_regs *regs = get_irq_regs();
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
+ debug_show_held_locks(prev);
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
+- dump_stack();
++
++ if (regs)
++ show_regs(regs);
++ else
++ dump_stack();
+ }
+
+ /*
+ * Various schedule()-time debugging checks and statistics:
+ */
+ static inline void schedule_debug(struct task_struct *prev)
+ {
+ /*
+- * Test if we are atomic. Since do_exit() needs to call into
++ * Test if we are atomic. Since do_exit() needs to call into
+ * schedule() atomically, we ignore that path for now.
+ * Otherwise, whine if we are scheduling when we should not be.
+ */
+ if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
+ __schedule_bug(prev);
+
+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+- schedstat_inc(this_rq(), sched_cnt);
++ schedstat_inc(this_rq(), sched_count);
++#ifdef CONFIG_SCHEDSTATS
++ if (unlikely(prev->lock_depth >= 0)) {
++ schedstat_inc(this_rq(), bkl_count);
++ schedstat_inc(prev, sched_info.bkl_count);
++ }
++#endif
+ }
+
+ /*
+ * Pick up the highest-prio task:
+ */
+ static inline struct task_struct *
+ pick_next_task(struct rq *rq, struct task_struct *prev)
+ {
+- struct sched_class *class;
++ const struct sched_class *class;
+ struct task_struct *p;
+
+ /*
+ * Optimization: we know that if all tasks are in
+ * the fair class we can call that function directly:
+@@ -3485,13 +3634,17 @@ need_resched:
+ release_kernel_lock(prev);
+ need_resched_nonpreemptible:
+
+ schedule_debug(prev);
+
+- spin_lock_irq(&rq->lock);
+- clear_tsk_need_resched(prev);
++ /*
++ * Do the rq-clock update outside the rq lock:
++ */
++ local_irq_disable();
+ __update_rq_clock(rq);
++ spin_lock(&rq->lock);
++ clear_tsk_need_resched(prev);
+
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+ unlikely(signal_pending(prev)))) {
+ prev->state = TASK_RUNNING;
+@@ -3530,11 +3683,11 @@ need_resched_nonpreemptible:
+ EXPORT_SYMBOL(schedule);
+
+ #ifdef CONFIG_PREEMPT
+ /*
+ * this is the entry point to schedule() from in-kernel preemption
+- * off of preempt_enable. Kernel preemptions off return from interrupt
++ * off of preempt_enable. Kernel preemptions off return from interrupt
+ * occur there and call schedule directly.
+ */
+ asmlinkage void __sched preempt_schedule(void)
+ {
+ struct thread_info *ti = current_thread_info();
+@@ -3542,36 +3695,39 @@ asmlinkage void __sched preempt_schedule
+ struct task_struct *task = current;
+ int saved_lock_depth;
+ #endif
+ /*
+ * If there is a non-zero preempt_count or interrupts are disabled,
+- * we do not want to preempt the current task. Just return..
++ * we do not want to preempt the current task. Just return..
+ */
+ if (likely(ti->preempt_count || irqs_disabled()))
+ return;
+
+-need_resched:
+- add_preempt_count(PREEMPT_ACTIVE);
+- /*
+- * We keep the big kernel semaphore locked, but we
+- * clear ->lock_depth so that schedule() doesnt
+- * auto-release the semaphore:
+- */
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++
++ /*
++ * We keep the big kernel semaphore locked, but we
++ * clear ->lock_depth so that schedule() doesnt
++ * auto-release the semaphore:
++ */
+ #ifdef CONFIG_PREEMPT_BKL
+- saved_lock_depth = task->lock_depth;
+- task->lock_depth = -1;
++ saved_lock_depth = task->lock_depth;
++ task->lock_depth = -1;
+ #endif
+- schedule();
++ schedule();
+ #ifdef CONFIG_PREEMPT_BKL
+- task->lock_depth = saved_lock_depth;
++ task->lock_depth = saved_lock_depth;
+ #endif
+- sub_preempt_count(PREEMPT_ACTIVE);
++ sub_preempt_count(PREEMPT_ACTIVE);
+
+- /* we could miss a preemption opportunity between schedule and now */
+- barrier();
+- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+- goto need_resched;
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+ }
+ EXPORT_SYMBOL(preempt_schedule);
+
+ /*
+ * this is the entry point to schedule() from kernel preemption
+@@ -3587,33 +3743,36 @@ asmlinkage void __sched preempt_schedule
+ int saved_lock_depth;
+ #endif
+ /* Catch callers which need to be fixed */
+ BUG_ON(ti->preempt_count || !irqs_disabled());
+
+-need_resched:
+- add_preempt_count(PREEMPT_ACTIVE);
+- /*
+- * We keep the big kernel semaphore locked, but we
+- * clear ->lock_depth so that schedule() doesnt
+- * auto-release the semaphore:
+- */
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++
++ /*
++ * We keep the big kernel semaphore locked, but we
++ * clear ->lock_depth so that schedule() doesnt
++ * auto-release the semaphore:
++ */
+ #ifdef CONFIG_PREEMPT_BKL
+- saved_lock_depth = task->lock_depth;
+- task->lock_depth = -1;
++ saved_lock_depth = task->lock_depth;
++ task->lock_depth = -1;
+ #endif
+- local_irq_enable();
+- schedule();
+- local_irq_disable();
++ local_irq_enable();
++ schedule();
++ local_irq_disable();
+ #ifdef CONFIG_PREEMPT_BKL
+- task->lock_depth = saved_lock_depth;
++ task->lock_depth = saved_lock_depth;
+ #endif
+- sub_preempt_count(PREEMPT_ACTIVE);
++ sub_preempt_count(PREEMPT_ACTIVE);
+
+- /* we could miss a preemption opportunity between schedule and now */
+- barrier();
+- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+- goto need_resched;
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+ }
+
+ #endif /* CONFIG_PREEMPT */
+
+ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+@@ -3622,25 +3781,24 @@ int default_wake_function(wait_queue_t *
+ return try_to_wake_up(curr->private, mode, sync);
+ }
+ EXPORT_SYMBOL(default_wake_function);
+
+ /*
+- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
++ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
++ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * number) then we wake all the non-exclusive tasks and one exclusive task.
+ *
+ * There are circumstances in which we can try to wake a task which has already
+- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
++ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * zero in this (rare) case, and we handle it by continuing to scan the queue.
+ */
+ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, int sync, void *key)
+ {
+- struct list_head *tmp, *next;
++ wait_queue_t *curr, *next;
+
+- list_for_each_safe(tmp, next, &q->task_list) {
+- wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
++ list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
+ unsigned flags = curr->flags;
+
+ if (curr->func(curr, mode, sync, key) &&
+ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+ break;
+@@ -3702,11 +3860,11 @@ __wake_up_sync(wait_queue_head_t *q, uns
+ __wake_up_common(q, mode, nr_exclusive, sync, NULL);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+
+-void fastcall complete(struct completion *x)
++void complete(struct completion *x)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+@@ -3714,11 +3872,11 @@ void fastcall complete(struct completion
+ 1, 0, NULL);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete);
+
+-void fastcall complete_all(struct completion *x)
++void complete_all(struct completion *x)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done += UINT_MAX/2;
+@@ -3726,210 +3884,123 @@ void fastcall complete_all(struct comple
+ 0, 0, NULL);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete_all);
+
+-void fastcall __sched wait_for_completion(struct completion *x)
+-{
+- might_sleep();
+-
+- spin_lock_irq(&x->wait.lock);
+- if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
+-
+- wait.flags |= WQ_FLAG_EXCLUSIVE;
+- __add_wait_queue_tail(&x->wait, &wait);
+- do {
+- __set_current_state(TASK_UNINTERRUPTIBLE);
+- spin_unlock_irq(&x->wait.lock);
+- schedule();
+- spin_lock_irq(&x->wait.lock);
+- } while (!x->done);
+- __remove_wait_queue(&x->wait, &wait);
+- }
+- x->done--;
+- spin_unlock_irq(&x->wait.lock);
+-}
+-EXPORT_SYMBOL(wait_for_completion);
+-
+-unsigned long fastcall __sched
+-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++static inline long __sched
++do_wait_for_common(struct completion *x, long timeout, int state)
+ {
+- might_sleep();
+-
+- spin_lock_irq(&x->wait.lock);
+ if (!x->done) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ wait.flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue_tail(&x->wait, &wait);
+ do {
+- __set_current_state(TASK_UNINTERRUPTIBLE);
++ if (state == TASK_INTERRUPTIBLE &&
++ signal_pending(current)) {
++ __remove_wait_queue(&x->wait, &wait);
++ return -ERESTARTSYS;
++ }
++ __set_current_state(state);
+ spin_unlock_irq(&x->wait.lock);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irq(&x->wait.lock);
+ if (!timeout) {
+ __remove_wait_queue(&x->wait, &wait);
+- goto out;
++ return timeout;
+ }
+ } while (!x->done);
+ __remove_wait_queue(&x->wait, &wait);
+ }
+ x->done--;
+-out:
+- spin_unlock_irq(&x->wait.lock);
+ return timeout;
+ }
+-EXPORT_SYMBOL(wait_for_completion_timeout);
+
+-int fastcall __sched wait_for_completion_interruptible(struct completion *x)
++static long __sched
++wait_for_common(struct completion *x, long timeout, int state)
+ {
+- int ret = 0;
+-
+ might_sleep();
+
+ spin_lock_irq(&x->wait.lock);
+- if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
+-
+- wait.flags |= WQ_FLAG_EXCLUSIVE;
+- __add_wait_queue_tail(&x->wait, &wait);
+- do {
+- if (signal_pending(current)) {
+- ret = -ERESTARTSYS;
+- __remove_wait_queue(&x->wait, &wait);
+- goto out;
+- }
+- __set_current_state(TASK_INTERRUPTIBLE);
+- spin_unlock_irq(&x->wait.lock);
+- schedule();
+- spin_lock_irq(&x->wait.lock);
+- } while (!x->done);
+- __remove_wait_queue(&x->wait, &wait);
+- }
+- x->done--;
+-out:
++ timeout = do_wait_for_common(x, timeout, state);
+ spin_unlock_irq(&x->wait.lock);
+-
+- return ret;
++ return timeout;
+ }
+-EXPORT_SYMBOL(wait_for_completion_interruptible);
+
+-unsigned long fastcall __sched
+-wait_for_completion_interruptible_timeout(struct completion *x,
+- unsigned long timeout)
++void __sched wait_for_completion(struct completion *x)
+ {
+- might_sleep();
+-
+- spin_lock_irq(&x->wait.lock);
+- if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
++ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(wait_for_completion);
+
+- wait.flags |= WQ_FLAG_EXCLUSIVE;
+- __add_wait_queue_tail(&x->wait, &wait);
+- do {
+- if (signal_pending(current)) {
+- timeout = -ERESTARTSYS;
+- __remove_wait_queue(&x->wait, &wait);
+- goto out;
+- }
+- __set_current_state(TASK_INTERRUPTIBLE);
+- spin_unlock_irq(&x->wait.lock);
+- timeout = schedule_timeout(timeout);
+- spin_lock_irq(&x->wait.lock);
+- if (!timeout) {
+- __remove_wait_queue(&x->wait, &wait);
+- goto out;
+- }
+- } while (!x->done);
+- __remove_wait_queue(&x->wait, &wait);
+- }
+- x->done--;
+-out:
+- spin_unlock_irq(&x->wait.lock);
+- return timeout;
++unsigned long __sched
++wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++{
++ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
+ }
+-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
++EXPORT_SYMBOL(wait_for_completion_timeout);
+
+-static inline void
+-sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
++int __sched wait_for_completion_interruptible(struct completion *x)
+ {
+- spin_lock_irqsave(&q->lock, *flags);
+- __add_wait_queue(q, wait);
+- spin_unlock(&q->lock);
++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
++ if (t == -ERESTARTSYS)
++ return t;
++ return 0;
+ }
++EXPORT_SYMBOL(wait_for_completion_interruptible);
+
+-static inline void
+-sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
++unsigned long __sched
++wait_for_completion_interruptible_timeout(struct completion *x,
++ unsigned long timeout)
+ {
+- spin_lock_irq(&q->lock);
+- __remove_wait_queue(q, wait);
+- spin_unlock_irqrestore(&q->lock, *flags);
++ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
+ }
++EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+
+-void __sched interruptible_sleep_on(wait_queue_head_t *q)
++static long __sched
++sleep_on_common(wait_queue_head_t *q, int state, long timeout)
+ {
+ unsigned long flags;
+ wait_queue_t wait;
+
+ init_waitqueue_entry(&wait, current);
+
+- current->state = TASK_INTERRUPTIBLE;
++ __set_current_state(state);
+
+- sleep_on_head(q, &wait, &flags);
+- schedule();
+- sleep_on_tail(q, &wait, &flags);
++ spin_lock_irqsave(&q->lock, flags);
++ __add_wait_queue(q, &wait);
++ spin_unlock(&q->lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&q->lock);
++ __remove_wait_queue(q, &wait);
++ spin_unlock_irqrestore(&q->lock, flags);
++
++ return timeout;
++}
++
++void __sched interruptible_sleep_on(wait_queue_head_t *q)
++{
++ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
+ EXPORT_SYMBOL(interruptible_sleep_on);
+
+ long __sched
+ interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+ {
+- unsigned long flags;
+- wait_queue_t wait;
+-
+- init_waitqueue_entry(&wait, current);
+-
+- current->state = TASK_INTERRUPTIBLE;
+-
+- sleep_on_head(q, &wait, &flags);
+- timeout = schedule_timeout(timeout);
+- sleep_on_tail(q, &wait, &flags);
+-
+- return timeout;
++ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
+ }
+ EXPORT_SYMBOL(interruptible_sleep_on_timeout);
+
+ void __sched sleep_on(wait_queue_head_t *q)
+ {
+- unsigned long flags;
+- wait_queue_t wait;
+-
+- init_waitqueue_entry(&wait, current);
+-
+- current->state = TASK_UNINTERRUPTIBLE;
+-
+- sleep_on_head(q, &wait, &flags);
+- schedule();
+- sleep_on_tail(q, &wait, &flags);
++ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
+ EXPORT_SYMBOL(sleep_on);
+
+ long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+ {
+- unsigned long flags;
+- wait_queue_t wait;
+-
+- init_waitqueue_entry(&wait, current);
+-
+- current->state = TASK_UNINTERRUPTIBLE;
+-
+- sleep_on_head(q, &wait, &flags);
+- timeout = schedule_timeout(timeout);
+- sleep_on_tail(q, &wait, &flags);
+-
+- return timeout;
++ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
+ }
+ EXPORT_SYMBOL(sleep_on_timeout);
+
+ #ifdef CONFIG_RT_MUTEXES
+
+@@ -3944,38 +4015,44 @@ EXPORT_SYMBOL(sleep_on_timeout);
+ * Used by the rt_mutex code to implement priority inheritance logic.
+ */
+ void rt_mutex_setprio(struct task_struct *p, int prio)
+ {
+ unsigned long flags;
+- int oldprio, on_rq;
++ int oldprio, on_rq, running;
+ struct rq *rq;
+
+ BUG_ON(prio < 0 || prio > MAX_PRIO);
+
+ rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
+
+ oldprio = p->prio;
+ on_rq = p->se.on_rq;
+- if (on_rq)
++ running = task_current(rq, p);
++ if (on_rq) {
+ dequeue_task(rq, p, 0);
++ if (running)
++ p->sched_class->put_prev_task(rq, p);
++ }
+
+ if (rt_prio(prio))
+ p->sched_class = &rt_sched_class;
+ else
+ p->sched_class = &fair_sched_class;
+
+ p->prio = prio;
+
+ if (on_rq) {
++ if (running)
++ p->sched_class->set_curr_task(rq);
+ enqueue_task(rq, p, 0);
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+- if (task_running(rq, p)) {
++ if (running) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else {
+ check_preempt_curr(rq, p);
+ }
+@@ -4135,13 +4212,13 @@ struct task_struct *idle_task(int cpu)
+
+ /**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ */
+-static inline struct task_struct *find_process_by_pid(pid_t pid)
++static struct task_struct *find_process_by_pid(pid_t pid)
+ {
+- return pid ? find_task_by_pid(pid) : current;
++ return pid ? find_task_by_vpid(pid) : current;
+ }
+
+ /* Actually do priority change: must hold rq lock. */
+ static void
+ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
+@@ -4177,11 +4254,11 @@ __setscheduler(struct rq *rq, struct tas
+ * NOTE that the task may be already dead.
+ */
+ int sched_setscheduler(struct task_struct *p, int policy,
+ struct sched_param *param)
+ {
+- int retval, oldprio, oldpolicy = -1, on_rq;
++ int retval, oldprio, oldpolicy = -1, on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+
+ /* may grab non-irq protected spin_locks */
+ BUG_ON(in_interrupt());
+@@ -4259,22 +4336,30 @@ recheck:
+ spin_unlock_irqrestore(&p->pi_lock, flags);
+ goto recheck;
+ }
+ update_rq_clock(rq);
+ on_rq = p->se.on_rq;
+- if (on_rq)
++ running = task_current(rq, p);
++ if (on_rq) {
+ deactivate_task(rq, p, 0);
++ if (running)
++ p->sched_class->put_prev_task(rq, p);
++ }
++
+ oldprio = p->prio;
+ __setscheduler(rq, p, policy, param->sched_priority);
++
+ if (on_rq) {
++ if (running)
++ p->sched_class->set_curr_task(rq);
+ activate_task(rq, p, 0);
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+- if (task_running(rq, p)) {
++ if (running) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else {
+ check_preempt_curr(rq, p);
+ }
+@@ -4314,12 +4399,12 @@ do_sched_setscheduler(pid_t pid, int pol
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ */
+-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+- struct sched_param __user *param)
++asmlinkage long
++sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+ {
+ /* negative values for policy are not valid */
+ if (policy < 0)
+ return -EINVAL;
+
+@@ -4341,26 +4426,24 @@ asmlinkage long sys_sched_setparam(pid_t
+ * @pid: the pid in question.
+ */
+ asmlinkage long sys_sched_getscheduler(pid_t pid)
+ {
+ struct task_struct *p;
+- int retval = -EINVAL;
++ int retval;
+
+ if (pid < 0)
+- goto out_nounlock;
++ return -EINVAL;
+
+ retval = -ESRCH;
+ read_lock(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ if (p) {
+ retval = security_task_getscheduler(p);
+ if (!retval)
+ retval = p->policy;
+ }
+ read_unlock(&tasklist_lock);
+-
+-out_nounlock:
+ return retval;
+ }
+
+ /**
+ * sys_sched_getscheduler - get the RT priority of a thread
+@@ -4369,14 +4452,14 @@ out_nounlock:
+ */
+ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
+ {
+ struct sched_param lp;
+ struct task_struct *p;
+- int retval = -EINVAL;
++ int retval;
+
+ if (!param || pid < 0)
+- goto out_nounlock;
++ return -EINVAL;
+
+ read_lock(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+@@ -4392,11 +4475,10 @@ asmlinkage long sys_sched_getparam(pid_t
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+-out_nounlock:
+ return retval;
+
+ out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
+@@ -4418,11 +4500,11 @@ long sched_setaffinity(pid_t pid, cpumas
+ return -ESRCH;
+ }
+
+ /*
+ * It is not safe to call set_cpus_allowed with the
+- * tasklist_lock held. We will bump the task_struct's
++ * tasklist_lock held. We will bump the task_struct's
+ * usage count and then drop tasklist_lock.
+ */
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+
+@@ -4435,12 +4517,25 @@ long sched_setaffinity(pid_t pid, cpumas
+ if (retval)
+ goto out_unlock;
+
+ cpus_allowed = cpuset_cpus_allowed(p);
+ cpus_and(new_mask, new_mask, cpus_allowed);
++ again:
+ retval = set_cpus_allowed(p, new_mask);
+
++ if (!retval) {
++ cpus_allowed = cpuset_cpus_allowed(p);
++ if (!cpus_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ new_mask = cpus_allowed;
++ goto again;
++ }
++ }
+ out_unlock:
+ put_task_struct(p);
+ mutex_unlock(&sched_hotcpu_mutex);
+ return retval;
+ }
+@@ -4552,12 +4647,12 @@ asmlinkage long sys_sched_getaffinity(pi
+ */
+ asmlinkage long sys_sched_yield(void)
+ {
+ struct rq *rq = this_rq_lock();
+
+- schedstat_inc(rq, yld_cnt);
+- current->sched_class->yield_task(rq, current);
++ schedstat_inc(rq, yld_count);
++ current->sched_class->yield_task(rq);
+
+ /*
+ * Since we are going to call schedule() anyway, there's
+ * no need to preempt or enable interrupts:
+ */
+@@ -4601,11 +4696,11 @@ EXPORT_SYMBOL(cond_resched);
+
+ /*
+ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
+ */
+ int cond_resched_lock(spinlock_t *lock)
+ {
+@@ -4655,11 +4750,11 @@ void __sched yield(void)
+ sys_sched_yield();
+ }
+ EXPORT_SYMBOL(yield);
+
+ /*
+- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+@@ -4747,15 +4842,16 @@ asmlinkage long sys_sched_get_priority_m
+ */
+ asmlinkage
+ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
+ {
+ struct task_struct *p;
+- int retval = -EINVAL;
++ unsigned int time_slice;
++ int retval;
+ struct timespec t;
+
+ if (pid < 0)
+- goto out_nounlock;
++ return -EINVAL;
+
+ retval = -ESRCH;
+ read_lock(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ if (!p)
+@@ -4763,16 +4859,32 @@ long sys_sched_rr_get_interval(pid_t pid
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+- jiffies_to_timespec(p->policy == SCHED_FIFO ?
+- 0 : static_prio_timeslice(p->static_prio), &t);
++ /*
++ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
++ * tasks that are on an otherwise idle runqueue:
++ */
++ time_slice = 0;
++ if (p->policy == SCHED_RR) {
++ time_slice = DEF_TIMESLICE;
++ } else {
++ struct sched_entity *se = &p->se;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ if (rq->cfs.load.weight)
++ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
++ task_rq_unlock(rq, &flags);
++ }
+ read_unlock(&tasklist_lock);
++ jiffies_to_timespec(time_slice, &t);
+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
+-out_nounlock:
+ return retval;
++
+ out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
+ }
+
+@@ -4782,32 +4894,33 @@ static void show_task(struct task_struct
+ {
+ unsigned long free = 0;
+ unsigned state;
+
+ state = p->state ? __ffs(p->state) + 1 : 0;
+- printk("%-13.13s %c", p->comm,
++ printk(KERN_INFO "%-13.13s %c", p->comm,
+ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
+ #if BITS_PER_LONG == 32
+ if (state == TASK_RUNNING)
+- printk(" running ");
++ printk(KERN_CONT " running ");
+ else
+- printk(" %08lx ", thread_saved_pc(p));
++ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
+ #else
+ if (state == TASK_RUNNING)
+- printk(" running task ");
++ printk(KERN_CONT " running task ");
+ else
+- printk(" %016lx ", thread_saved_pc(p));
++ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
+ #endif
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+ {
+ unsigned long *n = end_of_stack(p);
+ while (!*n)
+ n++;
+ free = (unsigned long)n - (unsigned long)end_of_stack(p);
+ }
+ #endif
+- printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid);
++ printk(KERN_CONT "%5lu %5d %6d\n", free,
++ task_pid_nr(p), task_pid_nr(p->parent));
+
+ if (state != TASK_RUNNING)
+ show_stack(p, NULL);
+ }
+
+@@ -4909,22 +5022,22 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+ static inline void sched_init_granularity(void)
+ {
+ unsigned int factor = 1 + ilog2(num_online_cpus());
+- const unsigned long limit = 100000000;
++ const unsigned long limit = 200000000;
+
+ sysctl_sched_min_granularity *= factor;
+ if (sysctl_sched_min_granularity > limit)
+ sysctl_sched_min_granularity = limit;
+
+ sysctl_sched_latency *= factor;
+ if (sysctl_sched_latency > limit)
+ sysctl_sched_latency = limit;
+
+- sysctl_sched_runtime_limit = sysctl_sched_latency;
+- sysctl_sched_wakeup_granularity = sysctl_sched_min_granularity / 2;
++ sysctl_sched_wakeup_granularity *= factor;
++ sysctl_sched_batch_wakeup_granularity *= factor;
+ }
+
+ #ifdef CONFIG_SMP
+ /*
+ * This is how migration works:
+@@ -4946,11 +5059,11 @@ static inline void sched_init_granularit
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+- * task must not exit() & deallocate itself prematurely. The
++ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+ {
+ struct migration_req req;
+@@ -4983,11 +5096,11 @@ out:
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed);
+
+ /*
+- * Move (not current) task off this cpu, onto dest cpu. We're doing
++ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+ * away from this CPU, or CPU going down), or because we're
+ * attempting to rebalance this task on exec (sched_exec).
+ *
+ * So we race with normal scheduler movements, but that's OK, as long
+@@ -5045,10 +5158,12 @@ static int migration_thread(void *data)
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ struct migration_req *req;
+ struct list_head *head;
+
++ try_to_freeze();
++
+ spin_lock_irq(&rq->lock);
+
+ if (cpu_is_offline(cpu)) {
+ spin_unlock_irq(&rq->lock);
+ goto wait_to_die;
+@@ -5089,50 +5204,69 @@ wait_to_die:
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
++
++static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
++{
++ int ret;
++
++ local_irq_disable();
++ ret = __migrate_task(p, src_cpu, dest_cpu);
++ local_irq_enable();
++ return ret;
++}
++
+ /*
+- * Figure out where task on dead CPU should go, use force if neccessary.
++ * Figure out where task on dead CPU should go, use force if necessary.
+ * NOTE: interrupts should be disabled by the caller
+ */
+ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+ {
+ unsigned long flags;
+ cpumask_t mask;
+ struct rq *rq;
+ int dest_cpu;
+
+-restart:
+- /* On same node? */
+- mask = node_to_cpumask(cpu_to_node(dead_cpu));
+- cpus_and(mask, mask, p->cpus_allowed);
+- dest_cpu = any_online_cpu(mask);
+-
+- /* On any allowed CPU? */
+- if (dest_cpu == NR_CPUS)
+- dest_cpu = any_online_cpu(p->cpus_allowed);
++ do {
++ /* On same node? */
++ mask = node_to_cpumask(cpu_to_node(dead_cpu));
++ cpus_and(mask, mask, p->cpus_allowed);
++ dest_cpu = any_online_cpu(mask);
++
++ /* On any allowed CPU? */
++ if (dest_cpu == NR_CPUS)
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++
++ /* No more Mr. Nice Guy. */
++ if (dest_cpu == NR_CPUS) {
++ cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
++ /*
++ * Try to stay on the same cpuset, where the
++ * current cpuset may be a subset of all cpus.
++ * The cpuset_cpus_allowed_locked() variant of
++ * cpuset_cpus_allowed() will not block. It must be
++ * called within calls to cpuset_lock/cpuset_unlock.
++ */
++ rq = task_rq_lock(p, &flags);
++ p->cpus_allowed = cpus_allowed;
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++ task_rq_unlock(rq, &flags);
+
+- /* No more Mr. Nice Guy. */
+- if (dest_cpu == NR_CPUS) {
+- rq = task_rq_lock(p, &flags);
+- cpus_setall(p->cpus_allowed);
+- dest_cpu = any_online_cpu(p->cpus_allowed);
+- task_rq_unlock(rq, &flags);
+-
+- /*
+- * Don't tell them about moving exiting tasks or
+- * kernel threads (both mm NULL), since they never
+- * leave kernel.
+- */
+- if (p->mm && printk_ratelimit())
+- printk(KERN_INFO "process %d (%s) no "
+- "longer affine to cpu%d\n",
+- p->pid, p->comm, dead_cpu);
+- }
+- if (!__migrate_task(p, dead_cpu, dest_cpu))
+- goto restart;
++ /*
++ * Don't tell them about moving exiting tasks or
++ * kernel threads (both mm NULL), since they never
++ * leave kernel.
++ */
++ if (p->mm && printk_ratelimit()) {
++ printk(KERN_INFO "process %d (%s) no "
++ "longer affine to cpu%d\n",
++ task_pid_nr(p), p->comm, dead_cpu);
++ }
++ }
++ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
+ }
+
+ /*
+ * While a dead CPU has no uninterruptible tasks queued at this point,
+ * it might still have a nonzero ->nr_uninterruptible counter, because
+@@ -5156,27 +5290,27 @@ static void migrate_nr_uninterruptible(s
+ /* Run through task list and migrate tasks from the dead cpu. */
+ static void migrate_live_tasks(int src_cpu)
+ {
+ struct task_struct *p, *t;
+
+- write_lock_irq(&tasklist_lock);
++ read_lock(&tasklist_lock);
+
+ do_each_thread(t, p) {
+ if (p == current)
+ continue;
+
+ if (task_cpu(p) == src_cpu)
+ move_task_off_dead_cpu(src_cpu, p);
+ } while_each_thread(t, p);
+
+- write_unlock_irq(&tasklist_lock);
++ read_unlock(&tasklist_lock);
+ }
+
+ /*
+ * Schedules idle task to be the next runnable task on current CPU.
+- * It does so by boosting its priority to highest possible and adding it to
+- * the _front_ of the runqueue. Used by CPU offline code.
++ * It does so by boosting its priority to highest possible.
++ * Used by CPU offline code.
+ */
+ void sched_idle_next(void)
+ {
+ int this_cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(this_cpu);
+@@ -5192,12 +5326,12 @@ void sched_idle_next(void)
+ */
+ spin_lock_irqsave(&rq->lock, flags);
+
+ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
+
+- /* Add idle task to the _front_ of its priority queue: */
+- activate_idle_task(p, rq);
++ update_rq_clock(rq);
++ activate_task(rq, p, 0);
+
+ spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ /*
+@@ -5219,26 +5353,25 @@ void idle_task_exit(void)
+ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
+ {
+ struct rq *rq = cpu_rq(dead_cpu);
+
+ /* Must be exiting, otherwise would be on tasklist. */
+- BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
++ BUG_ON(!p->exit_state);
+
+ /* Cannot have done final schedule yet: would have vanished. */
+ BUG_ON(p->state == TASK_DEAD);
+
+ get_task_struct(p);
+
+ /*
+ * Drop lock around migration; if someone else moves it,
+- * that's OK. No task can be added to this CPU, so iteration is
++ * that's OK. No task can be added to this CPU, so iteration is
+ * fine.
+- * NOTE: interrupts should be left disabled --dev@
+ */
+- spin_unlock(&rq->lock);
++ spin_unlock_irq(&rq->lock);
+ move_task_off_dead_cpu(dead_cpu, p);
+- spin_lock(&rq->lock);
++ spin_lock_irq(&rq->lock);
+
+ put_task_struct(p);
+ }
+
+ /* release_task() removes task from tasklist, so we won't find dead tasks. */
+@@ -5265,34 +5398,52 @@ static void migrate_dead_tasks(unsigned
+ static struct ctl_table sd_ctl_dir[] = {
+ {
+ .procname = "sched_domain",
+ .mode = 0555,
+ },
+- {0,},
++ {0, },
+ };
+
+ static struct ctl_table sd_ctl_root[] = {
+ {
+ .ctl_name = CTL_KERN,
+ .procname = "kernel",
+ .mode = 0555,
+ .child = sd_ctl_dir,
+ },
+- {0,},
++ {0, },
+ };
+
+ static struct ctl_table *sd_alloc_ctl_entry(int n)
+ {
+ struct ctl_table *entry =
+- kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
+-
+- BUG_ON(!entry);
+- memset(entry, 0, n * sizeof(struct ctl_table));
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+
+ return entry;
+ }
+
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
+ static void
+ set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+ mode_t mode, proc_handler *proc_handler)
+ {
+@@ -5306,10 +5457,13 @@ set_table_entry(struct ctl_table *entry,
+ static struct ctl_table *
+ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ {
+ struct ctl_table *table = sd_alloc_ctl_entry(12);
+
++ if (table == NULL)
++ return NULL;
++
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+@@ -5329,10 +5483,11 @@ sd_alloc_ctl_domain_table(struct sched_d
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[10], "flags", &sd->flags,
+ sizeof(int), 0644, proc_dointvec_minmax);
++ /* &table[11] is terminator */
+
+ return table;
+ }
+
+ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+@@ -5343,10 +5498,12 @@ static ctl_table *sd_alloc_ctl_cpu_table
+ char buf[32];
+
+ for_each_domain(cpu, sd)
+ domain_num++;
+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
+
+ i = 0;
+ for_each_domain(cpu, sd) {
+ snprintf(buf, 32, "domain%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+@@ -5357,28 +5514,48 @@ static ctl_table *sd_alloc_ctl_cpu_table
+ }
+ return table;
+ }
+
+ static struct ctl_table_header *sd_sysctl_header;
+-static void init_sched_domain_sysctl(void)
++static void register_sched_domain_sysctl(void)
+ {
+ int i, cpu_num = num_online_cpus();
+ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ char buf[32];
+
++ WARN_ON(sd_ctl_dir[0].child);
+ sd_ctl_dir[0].child = entry;
+
+- for (i = 0; i < cpu_num; i++, entry++) {
++ if (entry == NULL)
++ return;
++
++ for_each_online_cpu(i) {
+ snprintf(buf, 32, "cpu%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0555;
+ entry->child = sd_alloc_ctl_cpu_table(i);
++ entry++;
+ }
++
++ WARN_ON(sd_sysctl_header);
+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
+ }
++
++/* may be called multiple times per register */
++static void unregister_sched_domain_sysctl(void)
++{
++ if (sd_sysctl_header)
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++ if (sd_ctl_dir[0].child)
++ sd_free_ctl_entry(&sd_ctl_dir[0].child);
++}
+ #else
+-static void init_sched_domain_sysctl(void)
++static void register_sched_domain_sysctl(void)
++{
++}
++static void unregister_sched_domain_sysctl(void)
+ {
+ }
+ #endif
+
+ /*
+@@ -5401,57 +5578,62 @@ migration_call(struct notifier_block *nf
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
+ kthread_bind(p, cpu);
+ /* Must be high prio: stop_machine expects to yield to it. */
+ rq = task_rq_lock(p, &flags);
+ __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
+ task_rq_unlock(rq, &flags);
+ cpu_rq(cpu)->migration_thread = p;
+ break;
+
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- /* Strictly unneccessary, as first user will wake it. */
++ /* Strictly unnecessary, as first user will wake it. */
+ wake_up_process(cpu_rq(cpu)->migration_thread);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ if (!cpu_rq(cpu)->migration_thread)
+ break;
+- /* Unbind it from offline cpu so it can run. Fall thru. */
++ /* Unbind it from offline cpu so it can run. Fall thru. */
+ kthread_bind(cpu_rq(cpu)->migration_thread,
+ any_online_cpu(cpu_online_map));
+ kthread_stop(cpu_rq(cpu)->migration_thread);
+ cpu_rq(cpu)->migration_thread = NULL;
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
++ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
+ migrate_live_tasks(cpu);
+ rq = cpu_rq(cpu);
+ kthread_stop(rq->migration_thread);
+ rq->migration_thread = NULL;
+ /* Idle task back to normal (off runqueue, low prio) */
+- rq = task_rq_lock(rq->idle, &flags);
++ spin_lock_irq(&rq->lock);
+ update_rq_clock(rq);
+ deactivate_task(rq, rq->idle, 0);
+ rq->idle->static_prio = MAX_PRIO;
+ __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
+ rq->idle->sched_class = &idle_sched_class;
+ migrate_dead_tasks(cpu);
+- task_rq_unlock(rq, &flags);
++ spin_unlock_irq(&rq->lock);
++ cpuset_unlock();
+ migrate_nr_uninterruptible(rq);
+ BUG_ON(rq->nr_running != 0);
+
+- /* No need to migrate the tasks: it was best-effort if
+- * they didn't take sched_hotcpu_mutex. Just wake up
+- * the requestors. */
++ /*
++ * No need to migrate the tasks: it was best-effort if
++ * they didn't take sched_hotcpu_mutex. Just wake up
++ * the requestors.
++ */
+ spin_lock_irq(&rq->lock);
+ while (!list_empty(&rq->migration_queue)) {
+ struct migration_req *req;
+
+ req = list_entry(rq->migration_queue.next,
+@@ -5475,125 +5657,125 @@ migration_call(struct notifier_block *nf
+ static struct notifier_block __cpuinitdata migration_notifier = {
+ .notifier_call = migration_call,
+ .priority = 10
+ };
+
+-int __init migration_init(void)
++void __init migration_init(void)
+ {
+ void *cpu = (void *)(long)smp_processor_id();
+ int err;
+
+ /* Start one for the boot CPU: */
+ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+ BUG_ON(err == NOTIFY_BAD);
+ migration_call(&migration_notifier, CPU_ONLINE, cpu);
+ register_cpu_notifier(&migration_notifier);
+-
+- return 0;
+ }
+ #endif
+
+ #ifdef CONFIG_SMP
+
+ /* Number of possible processor ids */
+ int nr_cpu_ids __read_mostly = NR_CPUS;
+ EXPORT_SYMBOL(nr_cpu_ids);
+
+-#undef SCHED_DOMAIN_DEBUG
+-#ifdef SCHED_DOMAIN_DEBUG
+-static void sched_domain_debug(struct sched_domain *sd, int cpu)
+-{
+- int level = 0;
++#ifdef CONFIG_SCHED_DEBUG
+
+- if (!sd) {
+- printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
+- return;
++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
++{
++ struct sched_group *group = sd->groups;
++ cpumask_t groupmask;
++ char str[NR_CPUS];
++
++ cpumask_scnprintf(str, NR_CPUS, sd->span);
++ cpus_clear(groupmask);
++
++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
++
++ if (!(sd->flags & SD_LOAD_BALANCE)) {
++ printk("does not load-balance\n");
++ if (sd->parent)
++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
++ " has parent");
++ return -1;
+ }
+
+- printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
++ printk(KERN_CONT "span %s\n", str);
+
++ if (!cpu_isset(cpu, sd->span)) {
++ printk(KERN_ERR "ERROR: domain->span does not contain "
++ "CPU%d\n", cpu);
++ }
++ if (!cpu_isset(cpu, group->cpumask)) {
++ printk(KERN_ERR "ERROR: domain->groups does not contain"
++ " CPU%d\n", cpu);
++ }
++
++ printk(KERN_DEBUG "%*s groups:", level + 1, "");
+ do {
+- int i;
+- char str[NR_CPUS];
+- struct sched_group *group = sd->groups;
+- cpumask_t groupmask;
+-
+- cpumask_scnprintf(str, NR_CPUS, sd->span);
+- cpus_clear(groupmask);
+-
+- printk(KERN_DEBUG);
+- for (i = 0; i < level + 1; i++)
+- printk(" ");
+- printk("domain %d: ", level);
+-
+- if (!(sd->flags & SD_LOAD_BALANCE)) {
+- printk("does not load-balance\n");
+- if (sd->parent)
+- printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+- " has parent");
++ if (!group) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: group is NULL\n");
+ break;
+ }
+
+- printk("span %s\n", str);
++ if (!group->__cpu_power) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: domain->cpu_power not "
++ "set\n");
++ break;
++ }
+
+- if (!cpu_isset(cpu, sd->span))
+- printk(KERN_ERR "ERROR: domain->span does not contain "
+- "CPU%d\n", cpu);
+- if (!cpu_isset(cpu, group->cpumask))
+- printk(KERN_ERR "ERROR: domain->groups does not contain"
+- " CPU%d\n", cpu);
+-
+- printk(KERN_DEBUG);
+- for (i = 0; i < level + 2; i++)
+- printk(" ");
+- printk("groups:");
+- do {
+- if (!group) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: group is NULL\n");
+- break;
+- }
++ if (!cpus_weight(group->cpumask)) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: empty group\n");
++ break;
++ }
+
+- if (!group->__cpu_power) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: domain->cpu_power not "
+- "set\n");
+- }
++ if (cpus_intersects(groupmask, group->cpumask)) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: repeated CPUs\n");
++ break;
++ }
+
+- if (!cpus_weight(group->cpumask)) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: empty group\n");
+- }
++ cpus_or(groupmask, groupmask, group->cpumask);
+
+- if (cpus_intersects(groupmask, group->cpumask)) {
+- printk("\n");
+- printk(KERN_ERR "ERROR: repeated CPUs\n");
+- }
++ cpumask_scnprintf(str, NR_CPUS, group->cpumask);
++ printk(KERN_CONT " %s", str);
++
++ group = group->next;
++ } while (group != sd->groups);
++ printk(KERN_CONT "\n");
++
++ if (!cpus_equal(sd->span, groupmask))
++ printk(KERN_ERR "ERROR: groups don't span domain->span\n");
++
++ if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
++ printk(KERN_ERR "ERROR: parent span is not a superset "
++ "of domain->span\n");
++ return 0;
++}
+
+- cpus_or(groupmask, groupmask, group->cpumask);
++static void sched_domain_debug(struct sched_domain *sd, int cpu)
++{
++ int level = 0;
+
+- cpumask_scnprintf(str, NR_CPUS, group->cpumask);
+- printk(" %s", str);
++ if (!sd) {
++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
++ return;
++ }
+
+- group = group->next;
+- } while (group != sd->groups);
+- printk("\n");
+-
+- if (!cpus_equal(sd->span, groupmask))
+- printk(KERN_ERR "ERROR: groups don't span "
+- "domain->span\n");
++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+
++ for (;;) {
++ if (sched_domain_debug_one(sd, cpu, level))
++ break;
+ level++;
+ sd = sd->parent;
+ if (!sd)
+- continue;
+-
+- if (!cpus_subset(groupmask, sd->span))
+- printk(KERN_ERR "ERROR: parent span is not a superset "
+- "of domain->span\n");
+-
+- } while (sd);
++ break;
++ }
+ }
+ #else
+ # define sched_domain_debug(sd, cpu) do { } while (0)
+ #endif
+
+@@ -5698,11 +5880,11 @@ static int __init isolated_cpu_setup(cha
+ if (ints[i] < NR_CPUS)
+ cpu_set(ints[i], cpu_isolated_map);
+ return 1;
+ }
+
+-__setup ("isolcpus=", isolated_cpu_setup);
++__setup("isolcpus=", isolated_cpu_setup);
+
+ /*
+ * init_sched_build_groups takes the cpumask we wish to span, and a pointer
+ * to a function which identifies what group(along with sched group) a CPU
+ * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
+@@ -5755,11 +5937,11 @@ init_sched_build_groups(cpumask_t span,
+ /**
+ * find_next_best_node - find the next node to include in a sched_domain
+ * @node: node whose sched_domain we're building
+ * @used_nodes: nodes already in the sched_domain
+ *
+- * Find the next node to include in a given scheduling domain. Simply
++ * Find the next node to include in a given scheduling domain. Simply
+ * finds the closest node not already in the @used_nodes map.
+ *
+ * Should use nodemask_t.
+ */
+ static int find_next_best_node(int node, unsigned long *used_nodes)
+@@ -5795,11 +5977,11 @@ static int find_next_best_node(int node,
+ /**
+ * sched_domain_node_span - get a cpumask for a node's sched_domain
+ * @node: node whose cpumask we're constructing
+ * @size: number of nodes to include in this span
+ *
+- * Given a node, construct a good cpumask for its sched_domain to span. It
++ * Given a node, construct a good cpumask for its sched_domain to span. It
+ * should be one that prevents unnecessary balancing, but also spreads tasks
+ * out optimally.
+ */
+ static cpumask_t sched_domain_node_span(int node)
+ {
+@@ -5832,12 +6014,12 @@ int sched_smt_power_savings = 0, sched_m
+ */
+ #ifdef CONFIG_SCHED_SMT
+ static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
+ static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
+
+-static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ if (sg)
+ *sg = &per_cpu(sched_group_cpus, cpu);
+ return cpu;
+ }
+@@ -5850,44 +6032,44 @@ static int cpu_to_cpu_group(int cpu, con
+ static DEFINE_PER_CPU(struct sched_domain, core_domains);
+ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
+ #endif
+
+ #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
+-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ int group;
+- cpumask_t mask = cpu_sibling_map[cpu];
++ cpumask_t mask = cpu_sibling_map(cpu);
+ cpus_and(mask, mask, *cpu_map);
+ group = first_cpu(mask);
+ if (sg)
+ *sg = &per_cpu(sched_group_core, group);
+ return group;
+ }
+ #elif defined(CONFIG_SCHED_MC)
+-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ if (sg)
+ *sg = &per_cpu(sched_group_core, cpu);
+ return cpu;
+ }
+ #endif
+
+ static DEFINE_PER_CPU(struct sched_domain, phys_domains);
+ static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
+
+-static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
+- struct sched_group **sg)
++static int
++cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+ {
+ int group;
+ #ifdef CONFIG_SCHED_MC
+ cpumask_t mask = cpu_coregroup_map(cpu);
+ cpus_and(mask, mask, *cpu_map);
+ group = first_cpu(mask);
+ #elif defined(CONFIG_SCHED_SMT)
+- cpumask_t mask = cpu_sibling_map[cpu];
++ cpumask_t mask = cpu_sibling_map(cpu);
+ cpus_and(mask, mask, *cpu_map);
+ group = first_cpu(mask);
+ #else
+ group = cpu;
+ #endif
+@@ -5927,28 +6109,27 @@ static void init_numa_sched_groups_power
+ struct sched_group *sg = group_head;
+ int j;
+
+ if (!sg)
+ return;
+-next_sg:
+- for_each_cpu_mask(j, sg->cpumask) {
+- struct sched_domain *sd;
++ do {
++ for_each_cpu_mask(j, sg->cpumask) {
++ struct sched_domain *sd;
+
+- sd = &per_cpu(phys_domains, j);
+- if (j != first_cpu(sd->groups->cpumask)) {
+- /*
+- * Only add "power" once for each
+- * physical package.
+- */
+- continue;
+- }
++ sd = &per_cpu(phys_domains, j);
++ if (j != first_cpu(sd->groups->cpumask)) {
++ /*
++ * Only add "power" once for each
++ * physical package.
++ */
++ continue;
++ }
+
+- sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+- }
+- sg = sg->next;
+- if (sg != group_head)
+- goto next_sg;
++ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
++ }
++ sg = sg->next;
++ } while (sg != group_head);
+ }
+ #endif
+
+ #ifdef CONFIG_NUMA
+ /* Free memory allocated for various sched_group structures */
+@@ -6055,12 +6236,12 @@ static int build_sched_domains(const cpu
+ int sd_allnodes = 0;
+
+ /*
+ * Allocate the per-node list of sched groups
+ */
+- sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
+- GFP_KERNEL);
++ sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
++ GFP_KERNEL);
+ if (!sched_group_nodes) {
+ printk(KERN_WARNING "Can not alloc sched group node list\n");
+ return -ENOMEM;
+ }
+ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
+@@ -6118,22 +6299,22 @@ static int build_sched_domains(const cpu
+
+ #ifdef CONFIG_SCHED_SMT
+ p = sd;
+ sd = &per_cpu(cpu_domains, i);
+ *sd = SD_SIBLING_INIT;
+- sd->span = cpu_sibling_map[i];
++ sd->span = cpu_sibling_map(i);
+ cpus_and(sd->span, sd->span, *cpu_map);
+ sd->parent = p;
+ p->child = sd;
+ cpu_to_cpu_group(i, cpu_map, &sd->groups);
+ #endif
+ }
+
+ #ifdef CONFIG_SCHED_SMT
+ /* Set up CPU (sibling) groups */
+ for_each_cpu_mask(i, *cpu_map) {
+- cpumask_t this_sibling_map = cpu_sibling_map[i];
++ cpumask_t this_sibling_map = cpu_sibling_map(i);
+ cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
+ if (i != first_cpu(this_sibling_map))
+ continue;
+
+ init_sched_build_groups(this_sibling_map, cpu_map,
+@@ -6291,26 +6472,37 @@ static int build_sched_domains(const cpu
+ error:
+ free_sched_groups(cpu_map);
+ return -ENOMEM;
+ #endif
+ }
++
++static cpumask_t *doms_cur; /* current sched domains */
++static int ndoms_cur; /* number of sched domains in 'doms_cur' */
++
++/*
++ * Special case: If a kmalloc of a doms_cur partition (array of
++ * cpumask_t) fails, then fallback to a single sched domain,
++ * as determined by the single cpumask_t fallback_doms.
++ */
++static cpumask_t fallback_doms;
++
+ /*
+- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ * For now this just excludes isolated cpus, but could be used to
++ * exclude other special cases in the future.
+ */
+ static int arch_init_sched_domains(const cpumask_t *cpu_map)
+ {
+- cpumask_t cpu_default_map;
+ int err;
+
+- /*
+- * Setup mask for cpus without special case scheduling requirements.
+- * For now this just excludes isolated cpus, but could be used to
+- * exclude other special cases in the future.
+- */
+- cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
+-
+- err = build_sched_domains(&cpu_default_map);
++ ndoms_cur = 1;
++ doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
++ if (!doms_cur)
++ doms_cur = &fallback_doms;
++ cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
++ err = build_sched_domains(doms_cur);
++ register_sched_domain_sysctl();
+
+ return err;
+ }
+
+ static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
+@@ -6324,41 +6516,83 @@ static void arch_destroy_sched_domains(c
+ */
+ static void detach_destroy_domains(const cpumask_t *cpu_map)
+ {
+ int i;
+
++ unregister_sched_domain_sysctl();
++
+ for_each_cpu_mask(i, *cpu_map)
+ cpu_attach_domain(NULL, i);
+ synchronize_sched();
+ arch_destroy_sched_domains(cpu_map);
+ }
+
+ /*
+- * Partition sched domains as specified by the cpumasks below.
+- * This attaches all cpus from the cpumasks to the NULL domain,
+- * waits for a RCU quiescent period, recalculates sched
+- * domain information and then attaches them back to the
+- * correct sched domains
++ * Partition sched domains as specified by the 'ndoms_new'
++ * cpumasks in the array doms_new[] of cpumasks. This compares
++ * doms_new[] to the current sched domain partitioning, doms_cur[].
++ * It destroys each deleted domain and builds each new domain.
++ *
++ * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
++ * The masks don't intersect (don't overlap.) We should setup one
++ * sched domain for each mask. CPUs not in any of the cpumasks will
++ * not be load balanced. If the same cpumask appears both in the
++ * current 'doms_cur' domains and in the new 'doms_new', we can leave
++ * it as it is.
++ *
++ * The passed in 'doms_new' should be kmalloc'd. This routine takes
++ * ownership of it and will kfree it when done with it. If the caller
++ * failed the kmalloc call, then it can pass in doms_new == NULL,
++ * and partition_sched_domains() will fallback to the single partition
++ * 'fallback_doms'.
++ *
+ * Call with hotplug lock held
+ */
+-int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
++void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
+ {
+- cpumask_t change_map;
+- int err = 0;
++ int i, j;
+
+- cpus_and(*partition1, *partition1, cpu_online_map);
+- cpus_and(*partition2, *partition2, cpu_online_map);
+- cpus_or(change_map, *partition1, *partition2);
+-
+- /* Detach sched domains from all of the affected cpus */
+- detach_destroy_domains(&change_map);
+- if (!cpus_empty(*partition1))
+- err = build_sched_domains(partition1);
+- if (!err && !cpus_empty(*partition2))
+- err = build_sched_domains(partition2);
++ /* always unregister in case we don't destroy any domains */
++ unregister_sched_domain_sysctl();
+
+- return err;
++ if (doms_new == NULL) {
++ ndoms_new = 1;
++ doms_new = &fallback_doms;
++ cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
++ }
++
++ /* Destroy deleted domains */
++ for (i = 0; i < ndoms_cur; i++) {
++ for (j = 0; j < ndoms_new; j++) {
++ if (cpus_equal(doms_cur[i], doms_new[j]))
++ goto match1;
++ }
++ /* no match - a current sched domain not in new doms_new[] */
++ detach_destroy_domains(doms_cur + i);
++match1:
++ ;
++ }
++
++ /* Build new domains */
++ for (i = 0; i < ndoms_new; i++) {
++ for (j = 0; j < ndoms_cur; j++) {
++ if (cpus_equal(doms_new[i], doms_cur[j]))
++ goto match2;
++ }
++ /* no match - add a new doms_new */
++ build_sched_domains(doms_new + i);
++match2:
++ ;
++ }
++
++ /* Remember the new sched domains */
++ if (doms_cur != &fallback_doms)
++ kfree(doms_cur);
++ doms_cur = doms_new;
++ ndoms_cur = ndoms_new;
++
++ register_sched_domain_sysctl();
+ }
+
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ static int arch_reinit_sched_domains(void)
+ {
+@@ -6434,11 +6668,11 @@ int sched_create_sysfs_power_savings_ent
+ return err;
+ }
+ #endif
+
+ /*
+- * Force a reinitialization of the sched domains hierarchy. The domains
++ * Force a reinitialization of the sched domains hierarchy. The domains
+ * and groups cannot be updated in place without racing with the balancing
+ * code, so we temporarily attach all running cpus to the NULL domain
+ * which will prevent rebalancing while the sched domains are recalculated.
+ */
+ static int update_sched_domains(struct notifier_block *nfb,
+@@ -6485,12 +6719,10 @@ void __init sched_init_smp(void)
+ cpu_set(smp_processor_id(), non_isolated_cpus);
+ mutex_unlock(&sched_hotcpu_mutex);
+ /* XXX: Theoretical race here - CPU may be hotplugged now */
+ hotcpu_notifier(update_sched_domains, 0);
+
+- init_sched_domain_sysctl();
+-
+ /* Move init over to a non-isolated CPU */
+ if (set_cpus_allowed(current, non_isolated_cpus) < 0)
+ BUG();
+ sched_init_granularity();
+ }
+@@ -6501,40 +6733,29 @@ void __init sched_init_smp(void)
+ }
+ #endif /* CONFIG_SMP */
+
+ int in_sched_functions(unsigned long addr)
+ {
+- /* Linker adds these: start and end of __sched functions */
+- extern char __sched_text_start[], __sched_text_end[];
+-
+ return in_lock_functions(addr) ||
+ (addr >= (unsigned long)__sched_text_start
+ && addr < (unsigned long)__sched_text_end);
+ }
+
+-static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
++static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
+ {
+ cfs_rq->tasks_timeline = RB_ROOT;
+- cfs_rq->fair_clock = 1;
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ cfs_rq->rq = rq;
+ #endif
++ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+ }
+
+ void __init sched_init(void)
+ {
+- u64 now = sched_clock();
+ int highest_cpu = 0;
+ int i, j;
+
+- /*
+- * Link up the scheduling class hierarchy:
+- */
+- rt_sched_class.next = &fair_sched_class;
+- fair_sched_class.next = &idle_sched_class;
+- idle_sched_class.next = NULL;
+-
+ for_each_possible_cpu(i) {
+ struct rt_prio_array *array;
+ struct rq *rq;
+
+ rq = cpu_rq(i);
+@@ -6543,14 +6764,32 @@ void __init sched_init(void)
+ rq->nr_running = 0;
+ rq->clock = 1;
+ init_cfs_rq(&rq->cfs, rq);
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+- list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
++ {
++ struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
++ struct sched_entity *se =
++ &per_cpu(init_sched_entity, i);
++
++ init_cfs_rq_p[i] = cfs_rq;
++ init_cfs_rq(cfs_rq, rq);
++ cfs_rq->tg = &init_task_group;
++ list_add(&cfs_rq->leaf_cfs_rq_list,
++ &rq->leaf_cfs_rq_list);
++
++ init_sched_entity_p[i] = se;
++ se->cfs_rq = &rq->cfs;
++ se->my_q = cfs_rq;
++ se->load.weight = init_task_group_load;
++ se->load.inv_weight =
++ div64_64(1ULL<<32, init_task_group_load);
++ se->parent = NULL;
++ }
++ init_task_group.shares = init_task_group_load;
++ spin_lock_init(&init_task_group.lock);
+ #endif
+- rq->ls.load_update_last = now;
+- rq->ls.load_update_start = now;
+
+ for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
+ rq->cpu_load[j] = 0;
+ #ifdef CONFIG_SMP
+ rq->sd = NULL;
+@@ -6631,30 +6870,44 @@ void __might_sleep(char *file, int line)
+ }
+ EXPORT_SYMBOL(__might_sleep);
+ #endif
+
+ #ifdef CONFIG_MAGIC_SYSRQ
++static void normalize_task(struct rq *rq, struct task_struct *p)
++{
++ int on_rq;
++ update_rq_clock(rq);
++ on_rq = p->se.on_rq;
++ if (on_rq)
++ deactivate_task(rq, p, 0);
++ __setscheduler(rq, p, SCHED_NORMAL, 0);
++ if (on_rq) {
++ activate_task(rq, p, 0);
++ resched_task(rq->curr);
++ }
++}
++
+ void normalize_rt_tasks(void)
+ {
+ struct task_struct *g, *p;
+ unsigned long flags;
+ struct rq *rq;
+- int on_rq;
+
+ read_lock_irq(&tasklist_lock);
+ do_each_thread(g, p) {
+- p->se.fair_key = 0;
+- p->se.wait_runtime = 0;
++ /*
++ * Only normalize user tasks:
++ */
++ if (!p->mm)
++ continue;
++
+ p->se.exec_start = 0;
+- p->se.wait_start_fair = 0;
+- p->se.sleep_start_fair = 0;
+ #ifdef CONFIG_SCHEDSTATS
+ p->se.wait_start = 0;
+ p->se.sleep_start = 0;
+ p->se.block_start = 0;
+ #endif
+- task_rq(p)->cfs.fair_clock = 0;
+ task_rq(p)->clock = 0;
+
+ if (!rt_task(p)) {
+ /*
+ * Renice negative nice level userspace
+@@ -6665,30 +6918,13 @@ void normalize_rt_tasks(void)
+ continue;
+ }
+
+ spin_lock_irqsave(&p->pi_lock, flags);
+ rq = __task_rq_lock(p);
+-#ifdef CONFIG_SMP
+- /*
+- * Do not touch the migration thread:
+- */
+- if (p == rq->migration_thread)
+- goto out_unlock;
+-#endif
+
+- update_rq_clock(rq);
+- on_rq = p->se.on_rq;
+- if (on_rq)
+- deactivate_task(rq, p, 0);
+- __setscheduler(rq, p, SCHED_NORMAL, 0);
+- if (on_rq) {
+- activate_task(rq, p, 0);
+- resched_task(rq->curr);
+- }
+-#ifdef CONFIG_SMP
+- out_unlock:
+-#endif
++ normalize_task(rq, p);
++
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
+ } while_each_thread(g, p);
+
+ read_unlock_irq(&tasklist_lock);
+@@ -6722,12 +6958,12 @@ struct task_struct *curr_task(int cpu)
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+- * are serviced on a separate stack. It allows the architecture to switch the
+- * notion of the current task on a cpu in a non-blocking manner. This function
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a cpu in a non-blocking manner. This function
+ * must be called with all CPU's synchronized, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+@@ -6737,5 +6973,427 @@ void set_curr_task(int cpu, struct task_
+ {
+ cpu_curr(cpu) = p;
+ }
+
+ #endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(void)
++{
++ struct task_group *tg;
++ struct cfs_rq *cfs_rq;
++ struct sched_entity *se;
++ struct rq *rq;
++ int i;
++
++ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
++ if (!tg->cfs_rq)
++ goto err;
++ tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
++ if (!tg->se)
++ goto err;
++
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++
++ cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL,
++ cpu_to_node(i));
++ if (!cfs_rq)
++ goto err;
++
++ se = kmalloc_node(sizeof(struct sched_entity), GFP_KERNEL,
++ cpu_to_node(i));
++ if (!se)
++ goto err;
++
++ memset(cfs_rq, 0, sizeof(struct cfs_rq));
++ memset(se, 0, sizeof(struct sched_entity));
++
++ tg->cfs_rq[i] = cfs_rq;
++ init_cfs_rq(cfs_rq, rq);
++ cfs_rq->tg = tg;
++
++ tg->se[i] = se;
++ se->cfs_rq = &rq->cfs;
++ se->my_q = cfs_rq;
++ se->load.weight = NICE_0_LOAD;
++ se->load.inv_weight = div64_64(1ULL<<32, NICE_0_LOAD);
++ se->parent = NULL;
++ }
++
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++ cfs_rq = tg->cfs_rq[i];
++ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
++ }
++
++ tg->shares = NICE_0_LOAD;
++ spin_lock_init(&tg->lock);
++
++ return tg;
++
++err:
++ for_each_possible_cpu(i) {
++ if (tg->cfs_rq)
++ kfree(tg->cfs_rq[i]);
++ if (tg->se)
++ kfree(tg->se[i]);
++ }
++ kfree(tg->cfs_rq);
++ kfree(tg->se);
++ kfree(tg);
++
++ return ERR_PTR(-ENOMEM);
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void free_sched_group(struct rcu_head *rhp)
++{
++ struct task_group *tg = container_of(rhp, struct task_group, rcu);
++ struct cfs_rq *cfs_rq;
++ struct sched_entity *se;
++ int i;
++
++ /* now it should be safe to free those cfs_rqs */
++ for_each_possible_cpu(i) {
++ cfs_rq = tg->cfs_rq[i];
++ kfree(cfs_rq);
++
++ se = tg->se[i];
++ kfree(se);
++ }
++
++ kfree(tg->cfs_rq);
++ kfree(tg->se);
++ kfree(tg);
++}
++
++/* Destroy runqueue etc associated with a task group */
++void sched_destroy_group(struct task_group *tg)
++{
++ struct cfs_rq *cfs_rq = NULL;
++ int i;
++
++ for_each_possible_cpu(i) {
++ cfs_rq = tg->cfs_rq[i];
++ list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
++ }
++
++ BUG_ON(!cfs_rq);
++
++ /* wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, free_sched_group);
++}
++
++/* change task's runqueue when it moves between groups.
++ * The caller of this function should have put the task in its new group
++ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
++ * reflect its new group.
++ */
++void sched_move_task(struct task_struct *tsk)
++{
++ int on_rq, running;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(tsk, &flags);
++
++ if (tsk->sched_class != &fair_sched_class) {
++ set_task_cfs_rq(tsk, task_cpu(tsk));
++ goto done;
++ }
++
++ update_rq_clock(rq);
++
++ running = task_current(rq, tsk);
++ on_rq = tsk->se.on_rq;
++
++ if (on_rq) {
++ dequeue_task(rq, tsk, 0);
++ if (unlikely(running))
++ tsk->sched_class->put_prev_task(rq, tsk);
++ }
++
++ set_task_cfs_rq(tsk, task_cpu(tsk));
++
++ if (on_rq) {
++ if (unlikely(running))
++ tsk->sched_class->set_curr_task(rq);
++ enqueue_task(rq, tsk, 0);
++ }
++
++done:
++ task_rq_unlock(rq, &flags);
++}
++
++static void set_se_shares(struct sched_entity *se, unsigned long shares)
++{
++ struct cfs_rq *cfs_rq = se->cfs_rq;
++ struct rq *rq = cfs_rq->rq;
++ int on_rq;
++
++ spin_lock_irq(&rq->lock);
++
++ on_rq = se->on_rq;
++ if (on_rq)
++ dequeue_entity(cfs_rq, se, 0);
++
++ se->load.weight = shares;
++ se->load.inv_weight = div64_64((1ULL<<32), shares);
++
++ if (on_rq)
++ enqueue_entity(cfs_rq, se, 0);
++
++ spin_unlock_irq(&rq->lock);
++}
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++ int i;
++
++ spin_lock(&tg->lock);
++ if (tg->shares == shares)
++ goto done;
++
++ tg->shares = shares;
++ for_each_possible_cpu(i)
++ set_se_shares(tg->se[i], shares);
++
++done:
++ spin_unlock(&tg->lock);
++ return 0;
++}
++
++unsigned long sched_group_shares(struct task_group *tg)
++{
++ return tg->shares;
++}
++
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++#ifdef CONFIG_FAIR_CGROUP_SCHED
++
++/* return corresponding task_group object of a cgroup */
++static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
++{
++ return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
++ struct task_group, css);
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct task_group *tg;
++
++ if (!cgrp->parent) {
++ /* This is early initialization for the top cgroup */
++ init_task_group.css.cgroup = cgrp;
++ return &init_task_group.css;
++ }
++
++ /* we support only 1-level deep hierarchical scheduler atm */
++ if (cgrp->parent->parent)
++ return ERR_PTR(-EINVAL);
++
++ tg = sched_create_group();
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++
++ /* Bind the cgroup to task_group object we just created */
++ tg->css.cgroup = cgrp;
++
++ return &tg->css;
++}
++
++static void
++cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ struct task_group *tg = cgroup_tg(cgrp);
++
++ sched_destroy_group(tg);
++}
++
++static int
++cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
++ struct task_struct *tsk)
++{
++ /* We don't support RT-tasks being in separate groups */
++ if (tsk->sched_class != &fair_sched_class)
++ return -EINVAL;
++
++ return 0;
++}
++
++static void
++cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
++ struct cgroup *old_cont, struct task_struct *tsk)
++{
++ sched_move_task(tsk);
++}
++
++static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
++ u64 shareval)
++{
++ return sched_group_set_shares(cgroup_tg(cgrp), shareval);
++}
++
++static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
++{
++ struct task_group *tg = cgroup_tg(cgrp);
++
++ return (u64) tg->shares;
++}
++
++static struct cftype cpu_files[] = {
++ {
++ .name = "shares",
++ .read_uint = cpu_shares_read_uint,
++ .write_uint = cpu_shares_write_uint,
++ },
++};
++
++static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
++}
++
++struct cgroup_subsys cpu_cgroup_subsys = {
++ .name = "cpu",
++ .create = cpu_cgroup_create,
++ .destroy = cpu_cgroup_destroy,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .populate = cpu_cgroup_populate,
++ .subsys_id = cpu_cgroup_subsys_id,
++ .early_init = 1,
++};
++
++#endif /* CONFIG_FAIR_CGROUP_SCHED */
++
++#ifdef CONFIG_CGROUP_CPUACCT
++
++/*
++ * CPU accounting code for task groups.
++ *
++ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
++ * (balbir@in.ibm.com).
++ */
++
++/* track cpu usage of a group of tasks */
++struct cpuacct {
++ struct cgroup_subsys_state css;
++ /* cpuusage holds pointer to a u64-type object on every cpu */
++ u64 *cpuusage;
++};
++
++struct cgroup_subsys cpuacct_subsys;
++
++/* return cpu accounting group corresponding to this container */
++static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
++{
++ return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
++ struct cpuacct, css);
++}
++
++/* return cpu accounting group to which this task belongs */
++static inline struct cpuacct *task_ca(struct task_struct *tsk)
++{
++ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
++ struct cpuacct, css);
++}
++
++/* create a new cpu accounting group */
++static struct cgroup_subsys_state *cpuacct_create(
++ struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
++
++ if (!ca)
++ return ERR_PTR(-ENOMEM);
++
++ ca->cpuusage = alloc_percpu(u64);
++ if (!ca->cpuusage) {
++ kfree(ca);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return &ca->css;
++}
++
++/* destroy an existing cpu accounting group */
++static void
++cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ struct cpuacct *ca = cgroup_ca(cont);
++
++ free_percpu(ca->cpuusage);
++ kfree(ca);
++}
++
++/* return total cpu usage (in nanoseconds) of a group */
++static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
++{
++ struct cpuacct *ca = cgroup_ca(cont);
++ u64 totalcpuusage = 0;
++ int i;
++
++ for_each_possible_cpu(i) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
++
++ /*
++ * Take rq->lock to make 64-bit addition safe on 32-bit
++ * platforms.
++ */
++ spin_lock_irq(&cpu_rq(i)->lock);
++ totalcpuusage += *cpuusage;
++ spin_unlock_irq(&cpu_rq(i)->lock);
++ }
++
++ return totalcpuusage;
++}
++
++static struct cftype files[] = {
++ {
++ .name = "usage",
++ .read_uint = cpuusage_read,
++ },
++};
++
++static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
++{
++ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
++}
++
++/*
++ * charge this task's execution time to its accounting group.
++ *
++ * called with rq->lock held.
++ */
++static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
++{
++ struct cpuacct *ca;
++
++ if (!cpuacct_subsys.active)
++ return;
++
++ ca = task_ca(tsk);
++ if (ca) {
++ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
++
++ *cpuusage += cputime;
++ }
++}
++
++struct cgroup_subsys cpuacct_subsys = {
++ .name = "cpuacct",
++ .create = cpuacct_create,
++ .destroy = cpuacct_destroy,
++ .populate = cpuacct_populate,
++ .subsys_id = cpuacct_subsys_id,
++};
++#endif /* CONFIG_CGROUP_CPUACCT */
+--- linux-2.6.23.orig/kernel/sched_debug.c
++++ linux-2.6.23/kernel/sched_debug.c
+@@ -26,104 +26,125 @@
+ seq_printf(m, x); \
+ else \
+ printk(x); \
+ } while (0)
+
++/*
++ * Ease the printing of nsec fields:
++ */
++static long long nsec_high(long long nsec)
++{
++ if (nsec < 0) {
++ nsec = -nsec;
++ do_div(nsec, 1000000);
++ return -nsec;
++ }
++ do_div(nsec, 1000000);
++
++ return nsec;
++}
++
++static unsigned long nsec_low(long long nsec)
++{
++ if (nsec < 0)
++ nsec = -nsec;
++
++ return do_div(nsec, 1000000);
++}
++
++#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
++
+ static void
+ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
+ {
+ if (rq->curr == p)
+ SEQ_printf(m, "R");
+ else
+ SEQ_printf(m, " ");
+
+- SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d ",
++ SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
+ p->comm, p->pid,
+- (long long)p->se.fair_key,
+- (long long)(p->se.fair_key - rq->cfs.fair_clock),
+- (long long)p->se.wait_runtime,
++ SPLIT_NS(p->se.vruntime),
+ (long long)(p->nvcsw + p->nivcsw),
+ p->prio);
+ #ifdef CONFIG_SCHEDSTATS
+- SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
+- (long long)p->se.sum_exec_runtime,
+- (long long)p->se.sum_wait_runtime,
+- (long long)p->se.sum_sleep_runtime,
+- (long long)p->se.wait_runtime_overruns,
+- (long long)p->se.wait_runtime_underruns);
++ SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
++ SPLIT_NS(p->se.vruntime),
++ SPLIT_NS(p->se.sum_exec_runtime),
++ SPLIT_NS(p->se.sum_sleep_runtime));
+ #else
+- SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
+- 0LL, 0LL, 0LL, 0LL, 0LL);
++ SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
++ 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
+ #endif
+ }
+
+ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+ {
+ struct task_struct *g, *p;
++ unsigned long flags;
+
+ SEQ_printf(m,
+ "\nrunnable tasks:\n"
+- " task PID tree-key delta waiting"
+- " switches prio"
+- " sum-exec sum-wait sum-sleep"
+- " wait-overrun wait-underrun\n"
+- "------------------------------------------------------------------"
+- "----------------"
+- "------------------------------------------------"
+- "--------------------------------\n");
++ " task PID tree-key switches prio"
++ " exec-runtime sum-exec sum-sleep\n"
++ "------------------------------------------------------"
++ "----------------------------------------------------\n");
+
+- read_lock_irq(&tasklist_lock);
++ read_lock_irqsave(&tasklist_lock, flags);
+
+ do_each_thread(g, p) {
+ if (!p->se.on_rq || task_cpu(p) != rq_cpu)
+ continue;
+
+ print_task(m, rq, p);
+ } while_each_thread(g, p);
+
+- read_unlock_irq(&tasklist_lock);
++ read_unlock_irqrestore(&tasklist_lock, flags);
+ }
+
+-static void
+-print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
++void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ {
+- s64 wait_runtime_rq_sum = 0;
+- struct task_struct *p;
+- struct rb_node *curr;
+- unsigned long flags;
++ s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
++ spread, rq0_min_vruntime, spread0;
+ struct rq *rq = &per_cpu(runqueues, cpu);
++ struct sched_entity *last;
++ unsigned long flags;
+
+- spin_lock_irqsave(&rq->lock, flags);
+- curr = first_fair(cfs_rq);
+- while (curr) {
+- p = rb_entry(curr, struct task_struct, se.run_node);
+- wait_runtime_rq_sum += p->se.wait_runtime;
+-
+- curr = rb_next(curr);
+- }
+- spin_unlock_irqrestore(&rq->lock, flags);
+-
+- SEQ_printf(m, " .%-30s: %Ld\n", "wait_runtime_rq_sum",
+- (long long)wait_runtime_rq_sum);
+-}
+-
+-void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+-{
+ SEQ_printf(m, "\ncfs_rq\n");
+
+-#define P(x) \
+- SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
+-
+- P(fair_clock);
+- P(exec_clock);
+- P(wait_runtime);
+- P(wait_runtime_overruns);
+- P(wait_runtime_underruns);
+- P(sleeper_bonus);
+-#undef P
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
++ SPLIT_NS(cfs_rq->exec_clock));
+
+- print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
++ spin_lock_irqsave(&rq->lock, flags);
++ if (cfs_rq->rb_leftmost)
++ MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
++ last = __pick_last_entity(cfs_rq);
++ if (last)
++ max_vruntime = last->vruntime;
++ min_vruntime = rq->cfs.min_vruntime;
++ rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
++ spin_unlock_irqrestore(&rq->lock, flags);
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
++ SPLIT_NS(MIN_vruntime));
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
++ SPLIT_NS(min_vruntime));
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
++ SPLIT_NS(max_vruntime));
++ spread = max_vruntime - MIN_vruntime;
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
++ SPLIT_NS(spread));
++ spread0 = min_vruntime - rq0_min_vruntime;
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
++ SPLIT_NS(spread0));
++ SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
++ SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
++#ifdef CONFIG_SCHEDSTATS
++ SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
++ rq->bkl_count);
++#endif
++ SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
++ cfs_rq->nr_spread_over);
+ }
+
+ static void print_cpu(struct seq_file *m, int cpu)
+ {
+ struct rq *rq = &per_cpu(runqueues, cpu);
+@@ -139,35 +160,36 @@ static void print_cpu(struct seq_file *m
+ SEQ_printf(m, "\ncpu#%d\n", cpu);
+ #endif
+
+ #define P(x) \
+ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x))
++#define PN(x) \
++ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
+
+ P(nr_running);
+ SEQ_printf(m, " .%-30s: %lu\n", "load",
+- rq->ls.load.weight);
+- P(ls.delta_fair);
+- P(ls.delta_exec);
++ rq->load.weight);
+ P(nr_switches);
+ P(nr_load_updates);
+ P(nr_uninterruptible);
+ SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
+- P(next_balance);
++ PN(next_balance);
+ P(curr->pid);
+- P(clock);
+- P(idle_clock);
+- P(prev_clock_raw);
++ PN(clock);
++ PN(idle_clock);
++ PN(prev_clock_raw);
+ P(clock_warps);
+ P(clock_overflows);
+ P(clock_deep_idle_events);
+- P(clock_max_delta);
++ PN(clock_max_delta);
+ P(cpu_load[0]);
+ P(cpu_load[1]);
+ P(cpu_load[2]);
+ P(cpu_load[3]);
+ P(cpu_load[4]);
+ #undef P
++#undef PN
+
+ print_cfs_stats(m, cpu);
+
+ print_rq(m, rq, cpu);
+ }
+@@ -175,16 +197,29 @@ static void print_cpu(struct seq_file *m
+ static int sched_debug_show(struct seq_file *m, void *v)
+ {
+ u64 now = ktime_to_ns(ktime_get());
+ int cpu;
+
+- SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n",
++ SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+
+- SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now);
++ SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now));
++
++#define P(x) \
++ SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
++#define PN(x) \
++ SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
++ PN(sysctl_sched_latency);
++ PN(sysctl_sched_min_granularity);
++ PN(sysctl_sched_wakeup_granularity);
++ PN(sysctl_sched_batch_wakeup_granularity);
++ PN(sysctl_sched_child_runs_first);
++ P(sysctl_sched_features);
++#undef PN
++#undef P
+
+ for_each_online_cpu(cpu)
+ print_cpu(m, cpu);
+
+ SEQ_printf(m, "\n");
+@@ -200,11 +235,11 @@ static void sysrq_sched_debug_show(void)
+ static int sched_debug_open(struct inode *inode, struct file *filp)
+ {
+ return single_open(filp, sched_debug_show, NULL);
+ }
+
+-static struct file_operations sched_debug_fops = {
++static const struct file_operations sched_debug_fops = {
+ .open = sched_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+@@ -224,10 +259,11 @@ static int __init init_sched_debug_procf
+
+ __initcall(init_sched_debug_procfs);
+
+ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ {
++ unsigned long nr_switches;
+ unsigned long flags;
+ int num_threads = 1;
+
+ rcu_read_lock();
+ if (lock_task_sighand(p, &flags)) {
+@@ -235,53 +271,126 @@ void proc_sched_show_task(struct task_st
+ unlock_task_sighand(p, &flags);
+ }
+ rcu_read_unlock();
+
+ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
+- SEQ_printf(m, "----------------------------------------------\n");
++ SEQ_printf(m,
++ "---------------------------------------------------------\n");
++#define __P(F) \
++ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
+ #define P(F) \
+- SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
++ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
++#define __PN(F) \
++ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
++#define PN(F) \
++ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
++
++ PN(se.exec_start);
++ PN(se.vruntime);
++ PN(se.sum_exec_runtime);
+
+- P(se.wait_runtime);
+- P(se.wait_start_fair);
+- P(se.exec_start);
+- P(se.sleep_start_fair);
+- P(se.sum_exec_runtime);
++ nr_switches = p->nvcsw + p->nivcsw;
+
+ #ifdef CONFIG_SCHEDSTATS
+- P(se.wait_start);
+- P(se.sleep_start);
+- P(se.block_start);
+- P(se.sleep_max);
+- P(se.block_max);
+- P(se.exec_max);
+- P(se.wait_max);
+- P(se.wait_runtime_overruns);
+- P(se.wait_runtime_underruns);
+- P(se.sum_wait_runtime);
++ PN(se.wait_start);
++ PN(se.sleep_start);
++ PN(se.block_start);
++ PN(se.sleep_max);
++ PN(se.block_max);
++ PN(se.exec_max);
++ PN(se.slice_max);
++ PN(se.wait_max);
++ P(sched_info.bkl_count);
++ P(se.nr_migrations);
++ P(se.nr_migrations_cold);
++ P(se.nr_failed_migrations_affine);
++ P(se.nr_failed_migrations_running);
++ P(se.nr_failed_migrations_hot);
++ P(se.nr_forced_migrations);
++ P(se.nr_forced2_migrations);
++ P(se.nr_wakeups);
++ P(se.nr_wakeups_sync);
++ P(se.nr_wakeups_migrate);
++ P(se.nr_wakeups_local);
++ P(se.nr_wakeups_remote);
++ P(se.nr_wakeups_affine);
++ P(se.nr_wakeups_affine_attempts);
++ P(se.nr_wakeups_passive);
++ P(se.nr_wakeups_idle);
++
++ {
++ u64 avg_atom, avg_per_cpu;
++
++ avg_atom = p->se.sum_exec_runtime;
++ if (nr_switches)
++ do_div(avg_atom, nr_switches);
++ else
++ avg_atom = -1LL;
++
++ avg_per_cpu = p->se.sum_exec_runtime;
++ if (p->se.nr_migrations) {
++ avg_per_cpu = div64_64(avg_per_cpu,
++ p->se.nr_migrations);
++ } else {
++ avg_per_cpu = -1LL;
++ }
++
++ __PN(avg_atom);
++ __PN(avg_per_cpu);
++ }
+ #endif
+- SEQ_printf(m, "%-25s:%20Ld\n",
+- "nr_switches", (long long)(p->nvcsw + p->nivcsw));
++ __P(nr_switches);
++ SEQ_printf(m, "%-35s:%21Ld\n",
++ "nr_voluntary_switches", (long long)p->nvcsw);
++ SEQ_printf(m, "%-35s:%21Ld\n",
++ "nr_involuntary_switches", (long long)p->nivcsw);
++
+ P(se.load.weight);
+ P(policy);
+ P(prio);
++#undef PN
++#undef __PN
+ #undef P
++#undef __P
+
+ {
+ u64 t0, t1;
+
+ t0 = sched_clock();
+ t1 = sched_clock();
+- SEQ_printf(m, "%-25s:%20Ld\n",
++ SEQ_printf(m, "%-35s:%21Ld\n",
+ "clock-delta", (long long)(t1-t0));
+ }
+ }
+
+ void proc_sched_set_task(struct task_struct *p)
+ {
+ #ifdef CONFIG_SCHEDSTATS
+- p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
+- p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
++ p->se.wait_max = 0;
++ p->se.sleep_max = 0;
++ p->se.sum_sleep_runtime = 0;
++ p->se.block_max = 0;
++ p->se.exec_max = 0;
++ p->se.slice_max = 0;
++ p->se.nr_migrations = 0;
++ p->se.nr_migrations_cold = 0;
++ p->se.nr_failed_migrations_affine = 0;
++ p->se.nr_failed_migrations_running = 0;
++ p->se.nr_failed_migrations_hot = 0;
++ p->se.nr_forced_migrations = 0;
++ p->se.nr_forced2_migrations = 0;
++ p->se.nr_wakeups = 0;
++ p->se.nr_wakeups_sync = 0;
++ p->se.nr_wakeups_migrate = 0;
++ p->se.nr_wakeups_local = 0;
++ p->se.nr_wakeups_remote = 0;
++ p->se.nr_wakeups_affine = 0;
++ p->se.nr_wakeups_affine_attempts = 0;
++ p->se.nr_wakeups_passive = 0;
++ p->se.nr_wakeups_idle = 0;
++ p->sched_info.bkl_count = 0;
+ #endif
+- p->se.sum_exec_runtime = 0;
+- p->se.prev_sum_exec_runtime = 0;
++ p->se.sum_exec_runtime = 0;
++ p->se.prev_sum_exec_runtime = 0;
++ p->nvcsw = 0;
++ p->nivcsw = 0;
+ }
+--- linux-2.6.23.orig/kernel/sched_fair.c
++++ linux-2.6.23/kernel/sched_fair.c
+@@ -20,29 +20,38 @@
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+ /*
+ * Targeted preemption latency for CPU-bound tasks:
+- * (default: 20ms, units: nanoseconds)
++ * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * NOTE: this latency value is not the same as the concept of
+- * 'timeslice length' - timeslices in CFS are of variable length.
+- * (to see the precise effective timeslice length of your workload,
+- * run vmstat and monitor the context-switches field)
++ * 'timeslice length' - timeslices in CFS are of variable length
++ * and have no persistent notion like in traditional, time-slice
++ * based scheduling concepts.
+ *
+- * On SMP systems the value of this is multiplied by the log2 of the
+- * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
+- * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
+- * Targeted preemption latency for CPU-bound tasks:
++ * (to see the precise effective timeslice length of your workload,
++ * run vmstat and monitor the context-switches (cs) field)
+ */
+-unsigned int sysctl_sched_latency __read_mostly = 20000000ULL;
++unsigned int sysctl_sched_latency = 20000000ULL;
+
+ /*
+ * Minimal preemption granularity for CPU-bound tasks:
+- * (default: 2 msec, units: nanoseconds)
++ * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
++ */
++unsigned int sysctl_sched_min_granularity = 4000000ULL;
++
++/*
++ * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
+ */
+-unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
++static unsigned int sched_nr_latency = 5;
++
++/*
++ * After fork, child runs first. (default) If set to 0 then
++ * parent will (try to) run first.
++ */
++const_debug unsigned int sysctl_sched_child_runs_first = 1;
+
+ /*
+ * sys_sched_yield() compat mode
+ *
+ * This option switches the agressive yield implementation of the
+@@ -50,56 +59,29 @@ unsigned int sysctl_sched_min_granularit
+ */
+ unsigned int __read_mostly sysctl_sched_compat_yield;
+
+ /*
+ * SCHED_BATCH wake-up granularity.
+- * (default: 25 msec, units: nanoseconds)
++ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+-unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
++unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
+
+ /*
+ * SCHED_OTHER wake-up granularity.
+- * (default: 1 msec, units: nanoseconds)
++ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+-unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
+-
+-unsigned int sysctl_sched_stat_granularity __read_mostly;
+-
+-/*
+- * Initialized in sched_init_granularity() [to 5 times the base granularity]:
+- */
+-unsigned int sysctl_sched_runtime_limit __read_mostly;
+-
+-/*
+- * Debugging: various feature bits
+- */
+-enum {
+- SCHED_FEAT_FAIR_SLEEPERS = 1,
+- SCHED_FEAT_SLEEPER_AVG = 2,
+- SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
+- SCHED_FEAT_PRECISE_CPU_LOAD = 8,
+- SCHED_FEAT_START_DEBIT = 16,
+- SCHED_FEAT_SKIP_INITIAL = 32,
+-};
+-
+-unsigned int sysctl_sched_features __read_mostly =
+- SCHED_FEAT_FAIR_SLEEPERS *1 |
+- SCHED_FEAT_SLEEPER_AVG *0 |
+- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
+- SCHED_FEAT_PRECISE_CPU_LOAD *0 |
+- SCHED_FEAT_START_DEBIT *1 |
+- SCHED_FEAT_SKIP_INITIAL *0;
++unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
+
+-extern struct sched_class fair_sched_class;
++const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+
+ /**************************************************************
+ * CFS operations on generic schedulable entities:
+ */
+
+@@ -109,47 +91,22 @@ extern struct sched_class fair_sched_cla
+ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+ {
+ return cfs_rq->rq;
+ }
+
+-/* currently running entity (if any) on this cfs_rq */
+-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+-{
+- return cfs_rq->curr;
+-}
+-
+ /* An entity is a task if it doesn't "own" a runqueue */
+ #define entity_is_task(se) (!se->my_q)
+
+-static inline void
+-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+- cfs_rq->curr = se;
+-}
+-
+ #else /* CONFIG_FAIR_GROUP_SCHED */
+
+ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+ {
+ return container_of(cfs_rq, struct rq, cfs);
+ }
+
+-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+-{
+- struct rq *rq = rq_of(cfs_rq);
+-
+- if (unlikely(rq->curr->sched_class != &fair_sched_class))
+- return NULL;
+-
+- return &rq->curr->se;
+-}
+-
+ #define entity_is_task(se) 1
+
+-static inline void
+-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+-
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+
+ static inline struct task_struct *task_of(struct sched_entity *se)
+ {
+ return container_of(se, struct task_struct, se);
+@@ -158,20 +115,42 @@ static inline struct task_struct *task_o
+
+ /**************************************************************
+ * Scheduling class tree data structure manipulation methods:
+ */
+
++static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
++{
++ s64 delta = (s64)(vruntime - min_vruntime);
++ if (delta > 0)
++ min_vruntime = vruntime;
++
++ return min_vruntime;
++}
++
++static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
++{
++ s64 delta = (s64)(vruntime - min_vruntime);
++ if (delta < 0)
++ min_vruntime = vruntime;
++
++ return min_vruntime;
++}
++
++static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
++ return se->vruntime - cfs_rq->min_vruntime;
++}
++
+ /*
+ * Enqueue an entity into the rb-tree:
+ */
+-static inline void
+-__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct rb_node *parent = NULL;
+ struct sched_entity *entry;
+- s64 key = se->fair_key;
++ s64 key = entity_key(cfs_rq, se);
+ int leftmost = 1;
+
+ /*
+ * Find the right place in the rbtree:
+ */
+@@ -180,11 +159,11 @@ __enqueue_entity(struct cfs_rq *cfs_rq,
+ entry = rb_entry(parent, struct sched_entity, run_node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same key stay together.
+ */
+- if (key - entry->fair_key < 0) {
++ if (key < entity_key(cfs_rq, entry)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = 0;
+ }
+@@ -197,28 +176,18 @@ __enqueue_entity(struct cfs_rq *cfs_rq,
+ if (leftmost)
+ cfs_rq->rb_leftmost = &se->run_node;
+
+ rb_link_node(&se->run_node, parent, link);
+ rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+- update_load_add(&cfs_rq->load, se->load.weight);
+- cfs_rq->nr_running++;
+- se->on_rq = 1;
+-
+- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+ }
+
+-static inline void
+-__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ if (cfs_rq->rb_leftmost == &se->run_node)
+ cfs_rq->rb_leftmost = rb_next(&se->run_node);
+- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+- update_load_sub(&cfs_rq->load, se->load.weight);
+- cfs_rq->nr_running--;
+- se->on_rq = 0;
+
+- schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
++ rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+ }
+
+ static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
+ {
+ return cfs_rq->rb_leftmost;
+@@ -227,308 +196,206 @@ static inline struct rb_node *first_fair
+ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+ {
+ return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
+ }
+
++static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
++{
++ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
++ struct sched_entity *se = NULL;
++ struct rb_node *parent;
++
++ while (*link) {
++ parent = *link;
++ se = rb_entry(parent, struct sched_entity, run_node);
++ link = &parent->rb_right;
++ }
++
++ return se;
++}
++
+ /**************************************************************
+ * Scheduling class statistics methods:
+ */
+
++#ifdef CONFIG_SCHED_DEBUG
++int sched_nr_latency_handler(struct ctl_table *table, int write,
++ struct file *filp, void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
++
++ if (ret || !write)
++ return ret;
++
++ sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
++ sysctl_sched_min_granularity);
++
++ return 0;
++}
++#endif
++
+ /*
+- * Calculate the preemption granularity needed to schedule every
+- * runnable task once per sysctl_sched_latency amount of time.
+- * (down to a sensible low limit on granularity)
+- *
+- * For example, if there are 2 tasks running and latency is 10 msecs,
+- * we switch tasks every 5 msecs. If we have 3 tasks running, we have
+- * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
+- * for each task. We do finer and finer scheduling up to until we
+- * reach the minimum granularity value.
+- *
+- * To achieve this we use the following dynamic-granularity rule:
+- *
+- * gran = lat/nr - lat/nr/nr
++ * The idea is to set a period in which each task runs once.
+ *
+- * This comes out of the following equations:
++ * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
++ * this period because otherwise the slices get too small.
+ *
+- * kA1 + gran = kB1
+- * kB2 + gran = kA2
+- * kA2 = kA1
+- * kB2 = kB1 - d + d/nr
+- * lat = d * nr
+- *
+- * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
+- * '1' is start of time, '2' is end of time, 'd' is delay between
+- * 1 and 2 (during which task B was running), 'nr' is number of tasks
+- * running, 'lat' is the the period of each task. ('lat' is the
+- * sched_latency that we aim for.)
++ * p = (nr <= nl) ? l : l*nr/nl
+ */
+-static long
+-sched_granularity(struct cfs_rq *cfs_rq)
++static u64 __sched_period(unsigned long nr_running)
+ {
+- unsigned int gran = sysctl_sched_latency;
+- unsigned int nr = cfs_rq->nr_running;
++ u64 period = sysctl_sched_latency;
++ unsigned long nr_latency = sched_nr_latency;
+
+- if (nr > 1) {
+- gran = gran/nr - gran/nr/nr;
+- gran = max(gran, sysctl_sched_min_granularity);
++ if (unlikely(nr_running > nr_latency)) {
++ period *= nr_running;
++ do_div(period, nr_latency);
+ }
+
+- return gran;
++ return period;
+ }
+
+ /*
+- * We rescale the rescheduling granularity of tasks according to their
+- * nice level, but only linearly, not exponentially:
++ * We calculate the wall-time slice from the period by taking a part
++ * proportional to the weight.
++ *
++ * s = p*w/rw
+ */
+-static long
+-niced_granularity(struct sched_entity *curr, unsigned long granularity)
++static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- u64 tmp;
++ u64 slice = __sched_period(cfs_rq->nr_running);
+
+- if (likely(curr->load.weight == NICE_0_LOAD))
+- return granularity;
+- /*
+- * Positive nice levels get the same granularity as nice-0:
+- */
+- if (likely(curr->load.weight < NICE_0_LOAD)) {
+- tmp = curr->load.weight * (u64)granularity;
+- return (long) (tmp >> NICE_0_SHIFT);
+- }
+- /*
+- * Negative nice level tasks get linearly finer
+- * granularity:
+- */
+- tmp = curr->load.inv_weight * (u64)granularity;
++ slice *= se->load.weight;
++ do_div(slice, cfs_rq->load.weight);
+
+- /*
+- * It will always fit into 'long':
+- */
+- return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
++ return slice;
+ }
+
+-static inline void
+-limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
++/*
++ * We calculate the vruntime slice.
++ *
++ * vs = s/w = p/rw
++ */
++static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
+ {
+- long limit = sysctl_sched_runtime_limit;
++ u64 vslice = __sched_period(nr_running);
+
+- /*
+- * Niced tasks have the same history dynamic range as
+- * non-niced tasks:
+- */
+- if (unlikely(se->wait_runtime > limit)) {
+- se->wait_runtime = limit;
+- schedstat_inc(se, wait_runtime_overruns);
+- schedstat_inc(cfs_rq, wait_runtime_overruns);
+- }
+- if (unlikely(se->wait_runtime < -limit)) {
+- se->wait_runtime = -limit;
+- schedstat_inc(se, wait_runtime_underruns);
+- schedstat_inc(cfs_rq, wait_runtime_underruns);
+- }
++ vslice *= NICE_0_LOAD;
++ do_div(vslice, rq_weight);
++
++ return vslice;
+ }
+
+-static inline void
+-__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
++static u64 sched_vslice(struct cfs_rq *cfs_rq)
+ {
+- se->wait_runtime += delta;
+- schedstat_add(se, sum_wait_runtime, delta);
+- limit_wait_runtime(cfs_rq, se);
++ return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
+ }
+
+-static void
+-add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
++static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
+- __add_wait_runtime(cfs_rq, se, delta);
+- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
++ return __sched_vslice(cfs_rq->load.weight + se->load.weight,
++ cfs_rq->nr_running + 1);
+ }
+
+ /*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+ static inline void
+-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
++__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
++ unsigned long delta_exec)
+ {
+- unsigned long delta, delta_exec, delta_fair, delta_mine;
+- struct load_weight *lw = &cfs_rq->load;
+- unsigned long load = lw->weight;
++ unsigned long delta_exec_weighted;
++ u64 vruntime;
+
+- delta_exec = curr->delta_exec;
+ schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
+
+ curr->sum_exec_runtime += delta_exec;
+- cfs_rq->exec_clock += delta_exec;
+-
+- if (unlikely(!load))
+- return;
+-
+- delta_fair = calc_delta_fair(delta_exec, lw);
+- delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
+-
+- if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
+- delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
+- delta = min(delta, (unsigned long)(
+- (long)sysctl_sched_runtime_limit - curr->wait_runtime));
+- cfs_rq->sleeper_bonus -= delta;
+- delta_mine -= delta;
++ schedstat_add(cfs_rq, exec_clock, delta_exec);
++ delta_exec_weighted = delta_exec;
++ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
++ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
++ &curr->load);
+ }
++ curr->vruntime += delta_exec_weighted;
+
+- cfs_rq->fair_clock += delta_fair;
+ /*
+- * We executed delta_exec amount of time on the CPU,
+- * but we were only entitled to delta_mine amount of
+- * time during that period (if nr_running == 1 then
+- * the two values are equal)
+- * [Note: delta_mine - delta_exec is negative]:
++ * maintain cfs_rq->min_vruntime to be a monotonic increasing
++ * value tracking the leftmost vruntime in the tree.
+ */
+- add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
++ if (first_fair(cfs_rq)) {
++ vruntime = min_vruntime(curr->vruntime,
++ __pick_next_entity(cfs_rq)->vruntime);
++ } else
++ vruntime = curr->vruntime;
++
++ cfs_rq->min_vruntime =
++ max_vruntime(cfs_rq->min_vruntime, vruntime);
+ }
+
+ static void update_curr(struct cfs_rq *cfs_rq)
+ {
+- struct sched_entity *curr = cfs_rq_curr(cfs_rq);
++ struct sched_entity *curr = cfs_rq->curr;
++ u64 now = rq_of(cfs_rq)->clock;
+ unsigned long delta_exec;
+
+ if (unlikely(!curr))
+ return;
+
+ /*
+ * Get the amount of time the current task was running
+ * since the last time we changed load (this cannot
+ * overflow on 32 bits):
+ */
+- delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
++ delta_exec = (unsigned long)(now - curr->exec_start);
++
++ __update_curr(cfs_rq, curr, delta_exec);
++ curr->exec_start = now;
+
+- curr->delta_exec += delta_exec;
++ if (entity_is_task(curr)) {
++ struct task_struct *curtask = task_of(curr);
+
+- if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
+- __update_curr(cfs_rq, curr);
+- curr->delta_exec = 0;
++ cpuacct_charge(curtask, delta_exec);
+ }
+- curr->exec_start = rq_of(cfs_rq)->clock;
+ }
+
+ static inline void
+ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- se->wait_start_fair = cfs_rq->fair_clock;
+ schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
+ }
+
+ /*
+- * We calculate fair deltas here, so protect against the random effects
+- * of a multiplication overflow by capping it to the runtime limit:
+- */
+-#if BITS_PER_LONG == 32
+-static inline unsigned long
+-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+-{
+- u64 tmp = (u64)delta * weight >> shift;
+-
+- if (unlikely(tmp > sysctl_sched_runtime_limit*2))
+- return sysctl_sched_runtime_limit*2;
+- return tmp;
+-}
+-#else
+-static inline unsigned long
+-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+-{
+- return delta * weight >> shift;
+-}
+-#endif
+-
+-/*
+ * Task is being enqueued - update stats:
+ */
+ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- s64 key;
+-
+ /*
+ * Are we enqueueing a waiting task? (for current tasks
+ * a dequeue/enqueue event is a NOP)
+ */
+- if (se != cfs_rq_curr(cfs_rq))
++ if (se != cfs_rq->curr)
+ update_stats_wait_start(cfs_rq, se);
+- /*
+- * Update the key:
+- */
+- key = cfs_rq->fair_clock;
+-
+- /*
+- * Optimize the common nice 0 case:
+- */
+- if (likely(se->load.weight == NICE_0_LOAD)) {
+- key -= se->wait_runtime;
+- } else {
+- u64 tmp;
+-
+- if (se->wait_runtime < 0) {
+- tmp = -se->wait_runtime;
+- key += (tmp * se->load.inv_weight) >>
+- (WMULT_SHIFT - NICE_0_SHIFT);
+- } else {
+- tmp = se->wait_runtime;
+- key -= (tmp * se->load.inv_weight) >>
+- (WMULT_SHIFT - NICE_0_SHIFT);
+- }
+- }
+-
+- se->fair_key = key;
+-}
+-
+-/*
+- * Note: must be called with a freshly updated rq->fair_clock.
+- */
+-static inline void
+-__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+- unsigned long delta_fair = se->delta_fair_run;
+-
+- schedstat_set(se->wait_max, max(se->wait_max,
+- rq_of(cfs_rq)->clock - se->wait_start));
+-
+- if (unlikely(se->load.weight != NICE_0_LOAD))
+- delta_fair = calc_weighted(delta_fair, se->load.weight,
+- NICE_0_SHIFT);
+-
+- add_wait_runtime(cfs_rq, se, delta_fair);
+ }
+
+ static void
+ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long delta_fair;
+-
+- if (unlikely(!se->wait_start_fair))
+- return;
+-
+- delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+- (u64)(cfs_rq->fair_clock - se->wait_start_fair));
+-
+- se->delta_fair_run += delta_fair;
+- if (unlikely(abs(se->delta_fair_run) >=
+- sysctl_sched_stat_granularity)) {
+- __update_stats_wait_end(cfs_rq, se);
+- se->delta_fair_run = 0;
+- }
+-
+- se->wait_start_fair = 0;
++ schedstat_set(se->wait_max, max(se->wait_max,
++ rq_of(cfs_rq)->clock - se->wait_start));
+ schedstat_set(se->wait_start, 0);
+ }
+
+ static inline void
+ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- update_curr(cfs_rq);
+ /*
+ * Mark the end of the wait period if dequeueing a
+ * waiting task:
+ */
+- if (se != cfs_rq_curr(cfs_rq))
++ if (se != cfs_rq->curr)
+ update_stats_wait_end(cfs_rq, se);
+ }
+
+ /*
+ * We are picking a new current task - update its stats:
+@@ -540,83 +407,32 @@ update_stats_curr_start(struct cfs_rq *c
+ * We are starting a new run period:
+ */
+ se->exec_start = rq_of(cfs_rq)->clock;
+ }
+
+-/*
+- * We are descheduling a task - update its stats:
+- */
+-static inline void
+-update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+-{
+- se->exec_start = 0;
+-}
+-
+ /**************************************************
+ * Scheduling class queueing methods:
+ */
+
+-static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void
++account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long load = cfs_rq->load.weight, delta_fair;
+- long prev_runtime;
+-
+- /*
+- * Do not boost sleepers if there's too much bonus 'in flight'
+- * already:
+- */
+- if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
+- return;
+-
+- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
+- load = rq_of(cfs_rq)->cpu_load[2];
+-
+- delta_fair = se->delta_fair_sleep;
+-
+- /*
+- * Fix up delta_fair with the effect of us running
+- * during the whole sleep period:
+- */
+- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
+- delta_fair = div64_likely32((u64)delta_fair * load,
+- load + se->load.weight);
+-
+- if (unlikely(se->load.weight != NICE_0_LOAD))
+- delta_fair = calc_weighted(delta_fair, se->load.weight,
+- NICE_0_SHIFT);
+-
+- prev_runtime = se->wait_runtime;
+- __add_wait_runtime(cfs_rq, se, delta_fair);
+- delta_fair = se->wait_runtime - prev_runtime;
++ update_load_add(&cfs_rq->load, se->load.weight);
++ cfs_rq->nr_running++;
++ se->on_rq = 1;
++}
+
+- /*
+- * Track the amount of bonus we've given to sleepers:
+- */
+- cfs_rq->sleeper_bonus += delta_fair;
++static void
++account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
++ update_load_sub(&cfs_rq->load, se->load.weight);
++ cfs_rq->nr_running--;
++ se->on_rq = 0;
+ }
+
+ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- struct task_struct *tsk = task_of(se);
+- unsigned long delta_fair;
+-
+- if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
+- !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
+- return;
+-
+- delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+- (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
+-
+- se->delta_fair_sleep += delta_fair;
+- if (unlikely(abs(se->delta_fair_sleep) >=
+- sysctl_sched_stat_granularity)) {
+- __enqueue_sleeper(cfs_rq, se);
+- se->delta_fair_sleep = 0;
+- }
+-
+- se->sleep_start_fair = 0;
+-
+ #ifdef CONFIG_SCHEDSTATS
+ if (se->sleep_start) {
+ u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
+
+ if ((s64)delta < 0)
+@@ -644,38 +460,99 @@ static void enqueue_sleeper(struct cfs_r
+ * Blocking time is in units of nanosecs, so shift by 20 to
+ * get a milliseconds-range estimation of the amount of
+ * time that the task spent sleeping:
+ */
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
++ struct task_struct *tsk = task_of(se);
++
+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
+ delta >> 20);
+ }
+ }
+ #endif
+ }
+
++static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
++#ifdef CONFIG_SCHED_DEBUG
++ s64 d = se->vruntime - cfs_rq->min_vruntime;
++
++ if (d < 0)
++ d = -d;
++
++ if (d > 3*sysctl_sched_latency)
++ schedstat_inc(cfs_rq, nr_spread_over);
++#endif
++}
++
++static void
++place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
++{
++ u64 vruntime;
++
++ vruntime = cfs_rq->min_vruntime;
++
++ if (sched_feat(TREE_AVG)) {
++ struct sched_entity *last = __pick_last_entity(cfs_rq);
++ if (last) {
++ vruntime += last->vruntime;
++ vruntime >>= 1;
++ }
++ } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
++ vruntime += sched_vslice(cfs_rq)/2;
++
++ /*
++ * The 'current' period is already promised to the current tasks,
++ * however the extra weight of the new task will slow them down a
++ * little, place the new task so that it fits in the slot that
++ * stays open at the end.
++ */
++ if (initial && sched_feat(START_DEBIT))
++ vruntime += sched_vslice_add(cfs_rq, se);
++
++ if (!initial) {
++ /* sleeps upto a single latency don't count. */
++ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
++ vruntime -= sysctl_sched_latency;
++
++ /* ensure we never gain time by being placed backwards. */
++ vruntime = max_vruntime(se->vruntime, vruntime);
++ }
++
++ se->vruntime = vruntime;
++}
++
+ static void
+ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
+ {
+ /*
+- * Update the fair clock.
++ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+
+- if (wakeup)
++ if (wakeup) {
++ place_entity(cfs_rq, se, 0);
+ enqueue_sleeper(cfs_rq, se);
++ }
+
+ update_stats_enqueue(cfs_rq, se);
+- __enqueue_entity(cfs_rq, se);
++ check_spread(cfs_rq, se);
++ if (se != cfs_rq->curr)
++ __enqueue_entity(cfs_rq, se);
++ account_entity_enqueue(cfs_rq, se);
+ }
+
+ static void
+ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
+ {
++ /*
++ * Update run-time statistics of the 'current'.
++ */
++ update_curr(cfs_rq);
++
+ update_stats_dequeue(cfs_rq, se);
+ if (sleep) {
+- se->sleep_start_fair = cfs_rq->fair_clock;
+ #ifdef CONFIG_SCHEDSTATS
+ if (entity_is_task(se)) {
+ struct task_struct *tsk = task_of(se);
+
+ if (tsk->state & TASK_INTERRUPTIBLE)
+@@ -683,72 +560,68 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+ if (tsk->state & TASK_UNINTERRUPTIBLE)
+ se->block_start = rq_of(cfs_rq)->clock;
+ }
+ #endif
+ }
+- __dequeue_entity(cfs_rq, se);
++
++ if (se != cfs_rq->curr)
++ __dequeue_entity(cfs_rq, se);
++ account_entity_dequeue(cfs_rq, se);
+ }
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+ static void
+-__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
+- struct sched_entity *curr, unsigned long granularity)
++check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ {
+- s64 __delta = curr->fair_key - se->fair_key;
+ unsigned long ideal_runtime, delta_exec;
+
+- /*
+- * ideal_runtime is compared against sum_exec_runtime, which is
+- * walltime, hence do not scale.
+- */
+- ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
+- (unsigned long)sysctl_sched_min_granularity);
+-
+- /*
+- * If we executed more than what the latency constraint suggests,
+- * reduce the rescheduling granularity. This way the total latency
+- * of how much a task is not scheduled converges to
+- * sysctl_sched_latency:
+- */
++ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime)
+- granularity = 0;
+-
+- /*
+- * Take scheduling granularity into account - do not
+- * preempt the current task unless the best task has
+- * a larger than sched_granularity fairness advantage:
+- *
+- * scale granularity as key space is in fair_clock.
+- */
+- if (__delta > niced_granularity(curr, granularity))
+ resched_task(rq_of(cfs_rq)->curr);
+ }
+
+-static inline void
++static void
+ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- /*
+- * Any task has to be enqueued before it get to execute on
+- * a CPU. So account for the time it spent waiting on the
+- * runqueue. (note, here we rely on pick_next_task() having
+- * done a put_prev_task_fair() shortly before this, which
+- * updated rq->fair_clock - used by update_stats_wait_end())
+- */
+- update_stats_wait_end(cfs_rq, se);
++ /* 'current' is not kept within the tree. */
++ if (se->on_rq) {
++ /*
++ * Any task has to be enqueued before it get to execute on
++ * a CPU. So account for the time it spent waiting on the
++ * runqueue.
++ */
++ update_stats_wait_end(cfs_rq, se);
++ __dequeue_entity(cfs_rq, se);
++ }
++
+ update_stats_curr_start(cfs_rq, se);
+- set_cfs_rq_curr(cfs_rq, se);
++ cfs_rq->curr = se;
++#ifdef CONFIG_SCHEDSTATS
++ /*
++ * Track our maximum slice length, if the CPU's load is at
++ * least twice that of our own weight (i.e. dont track it
++ * when there are only lesser-weight tasks around):
++ */
++ if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
++ se->slice_max = max(se->slice_max,
++ se->sum_exec_runtime - se->prev_sum_exec_runtime);
++ }
++#endif
+ se->prev_sum_exec_runtime = se->sum_exec_runtime;
+ }
+
+ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
+ {
+- struct sched_entity *se = __pick_next_entity(cfs_rq);
++ struct sched_entity *se = NULL;
+
+- set_next_entity(cfs_rq, se);
++ if (first_fair(cfs_rq)) {
++ se = __pick_next_entity(cfs_rq);
++ set_next_entity(cfs_rq, se);
++ }
+
+ return se;
+ }
+
+ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
+@@ -758,37 +631,28 @@ static void put_prev_entity(struct cfs_r
+ * was not called and update_curr() has to be done:
+ */
+ if (prev->on_rq)
+ update_curr(cfs_rq);
+
+- update_stats_curr_end(cfs_rq, prev);
+-
+- if (prev->on_rq)
++ check_spread(cfs_rq, prev);
++ if (prev->on_rq) {
+ update_stats_wait_start(cfs_rq, prev);
+- set_cfs_rq_curr(cfs_rq, NULL);
++ /* Put 'current' back into the tree. */
++ __enqueue_entity(cfs_rq, prev);
++ }
++ cfs_rq->curr = NULL;
+ }
+
+ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ {
+- struct sched_entity *next;
+-
+- /*
+- * Dequeue and enqueue the task to update its
+- * position within the tree:
+- */
+- dequeue_entity(cfs_rq, curr, 0);
+- enqueue_entity(cfs_rq, curr, 0);
+-
+ /*
+- * Reschedule if another task tops the current one.
++ * Update run-time statistics of the 'current'.
+ */
+- next = __pick_next_entity(cfs_rq);
+- if (next == curr)
+- return;
++ update_curr(cfs_rq);
+
+- __check_preempt_curr_fair(cfs_rq, next, curr,
+- sched_granularity(cfs_rq));
++ if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
++ check_preempt_tick(cfs_rq, curr);
+ }
+
+ /**************************************************
+ * CFS operations on tasks:
+ */
+@@ -819,27 +683,32 @@ static inline struct cfs_rq *group_cfs_r
+ /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
+ * another cpu ('this_cpu')
+ */
+ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+ {
+- /* A later patch will take group into account */
+- return &cpu_rq(this_cpu)->cfs;
++ return cfs_rq->tg->cfs_rq[this_cpu];
+ }
+
+ /* Iterate thr' all leaf cfs_rq's on a runqueue */
+ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
+ list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+-/* Do the two (enqueued) tasks belong to the same group ? */
+-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
++/* Do the two (enqueued) entities belong to the same group ? */
++static inline int
++is_same_group(struct sched_entity *se, struct sched_entity *pse)
+ {
+- if (curr->se.cfs_rq == p->se.cfs_rq)
++ if (se->cfs_rq == pse->cfs_rq)
+ return 1;
+
+ return 0;
+ }
+
++static inline struct sched_entity *parent_entity(struct sched_entity *se)
++{
++ return se->parent;
++}
++
+ #else /* CONFIG_FAIR_GROUP_SCHED */
+
+ #define for_each_sched_entity(se) \
+ for (; se; se = NULL)
+
+@@ -868,15 +737,21 @@ static inline struct cfs_rq *cpu_cfs_rq(
+ }
+
+ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
+ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
++static inline int
++is_same_group(struct sched_entity *se, struct sched_entity *pse)
+ {
+ return 1;
+ }
+
++static inline struct sched_entity *parent_entity(struct sched_entity *se)
++{
++ return NULL;
++}
++
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+
+ /*
+ * The enqueue_task method is called before nr_running is
+ * increased. Here we update the fair scheduling stats and
+@@ -890,10 +765,11 @@ static void enqueue_task_fair(struct rq
+ for_each_sched_entity(se) {
+ if (se->on_rq)
+ break;
+ cfs_rq = cfs_rq_of(se);
+ enqueue_entity(cfs_rq, se, wakeup);
++ wakeup = 1;
+ }
+ }
+
+ /*
+ * The dequeue_task method is called before nr_running is
+@@ -909,97 +785,95 @@ static void dequeue_task_fair(struct rq
+ cfs_rq = cfs_rq_of(se);
+ dequeue_entity(cfs_rq, se, sleep);
+ /* Don't dequeue parent if it has other entities besides us */
+ if (cfs_rq->load.weight)
+ break;
++ sleep = 1;
+ }
+ }
+
+ /*
+ * sched_yield() support is very simple - we dequeue and enqueue.
+ *
+ * If compat_yield is turned on then we requeue to the end of the tree.
+ */
+-static void yield_task_fair(struct rq *rq, struct task_struct *p)
++static void yield_task_fair(struct rq *rq)
+ {
+- struct cfs_rq *cfs_rq = task_cfs_rq(p);
+- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+- struct sched_entity *rightmost, *se = &p->se;
+- struct rb_node *parent;
++ struct task_struct *curr = rq->curr;
++ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
++ struct sched_entity *rightmost, *se = &curr->se;
+
+ /*
+ * Are we the only task in the tree?
+ */
+ if (unlikely(cfs_rq->nr_running == 1))
+ return;
+
+- if (likely(!sysctl_sched_compat_yield)) {
++ if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
+ __update_rq_clock(rq);
+ /*
+- * Dequeue and enqueue the task to update its
+- * position within the tree:
++ * Update run-time statistics of the 'current'.
+ */
+- dequeue_entity(cfs_rq, &p->se, 0);
+- enqueue_entity(cfs_rq, &p->se, 0);
++ update_curr(cfs_rq);
+
+ return;
+ }
+ /*
+ * Find the rightmost entry in the rbtree:
+ */
+- do {
+- parent = *link;
+- link = &parent->rb_right;
+- } while (*link);
+-
+- rightmost = rb_entry(parent, struct sched_entity, run_node);
++ rightmost = __pick_last_entity(cfs_rq);
+ /*
+ * Already in the rightmost position?
+ */
+- if (unlikely(rightmost == se))
++ if (unlikely(rightmost->vruntime < se->vruntime))
+ return;
+
+ /*
+ * Minimally necessary key value to be last in the tree:
++ * Upon rescheduling, sched_class::put_prev_task() will place
++ * 'current' within the tree based on its new key value.
+ */
+- se->fair_key = rightmost->fair_key + 1;
+-
+- if (cfs_rq->rb_leftmost == &se->run_node)
+- cfs_rq->rb_leftmost = rb_next(&se->run_node);
+- /*
+- * Relink the task to the rightmost position:
+- */
+- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+- rb_link_node(&se->run_node, parent, link);
+- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
++ se->vruntime = rightmost->vruntime + 1;
+ }
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+-static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
++static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
+ {
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
++ struct sched_entity *se = &curr->se, *pse = &p->se;
+ unsigned long gran;
+
+ if (unlikely(rt_prio(p->prio))) {
+ update_rq_clock(rq);
+ update_curr(cfs_rq);
+ resched_task(curr);
+ return;
+ }
+-
+- gran = sysctl_sched_wakeup_granularity;
+ /*
+- * Batch tasks prefer throughput over latency:
++ * Batch tasks do not preempt (their preemption is driven by
++ * the tick):
+ */
+ if (unlikely(p->policy == SCHED_BATCH))
+- gran = sysctl_sched_batch_wakeup_granularity;
++ return;
++
++ if (!sched_feat(WAKEUP_PREEMPT))
++ return;
++
++ while (!is_same_group(se, pse)) {
++ se = parent_entity(se);
++ pse = parent_entity(pse);
++ }
+
+- if (is_same_group(curr, p))
+- __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
++ gran = sysctl_sched_wakeup_granularity;
++ if (unlikely(se->load.weight != NICE_0_LOAD))
++ gran = calc_delta_fair(gran, &se->load);
++
++ if (pse->vruntime + gran < se->vruntime)
++ resched_task(curr);
+ }
+
+ static struct task_struct *pick_next_task_fair(struct rq *rq)
+ {
+ struct cfs_rq *cfs_rq = &rq->cfs;
+@@ -1028,10 +902,11 @@ static void put_prev_task_fair(struct rq
+ cfs_rq = cfs_rq_of(se);
+ put_prev_entity(cfs_rq, se);
+ }
+ }
+
++#ifdef CONFIG_SMP
+ /**************************************************
+ * Fair scheduling class load-balancing methods:
+ */
+
+ /*
+@@ -1039,11 +914,11 @@ static void put_prev_task_fair(struct rq
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+ * the current task:
+ */
+-static inline struct task_struct *
++static struct task_struct *
+ __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+ {
+ struct task_struct *p;
+
+ if (!curr)
+@@ -1076,25 +951,27 @@ static int cfs_rq_best_prio(struct cfs_r
+ struct task_struct *p;
+
+ if (!cfs_rq->nr_running)
+ return MAX_PRIO;
+
+- curr = __pick_next_entity(cfs_rq);
++ curr = cfs_rq->curr;
++ if (!curr)
++ curr = __pick_next_entity(cfs_rq);
++
+ p = task_of(curr);
+
+ return p->prio;
+ }
+ #endif
+
+ static unsigned long
+ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
++ unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, int *this_best_prio)
+ {
+ struct cfs_rq *busy_cfs_rq;
+- unsigned long load_moved, total_nr_moved = 0, nr_moved;
+ long rem_load_move = max_load_move;
+ struct rq_iterator cfs_rq_iterator;
+
+ cfs_rq_iterator.start = load_balance_start_fair;
+ cfs_rq_iterator.next = load_balance_next_fair;
+@@ -1118,29 +995,52 @@ load_balance_fair(struct rq *this_rq, in
+
+ *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+ #else
+ # define maxload rem_load_move
+ #endif
+- /* pass busy_cfs_rq argument into
++ /*
++ * pass busy_cfs_rq argument into
+ * load_balance_[start|next]_fair iterators
+ */
+ cfs_rq_iterator.arg = busy_cfs_rq;
+- nr_moved = balance_tasks(this_rq, this_cpu, busiest,
+- max_nr_move, maxload, sd, idle, all_pinned,
+- &load_moved, this_best_prio, &cfs_rq_iterator);
+-
+- total_nr_moved += nr_moved;
+- max_nr_move -= nr_moved;
+- rem_load_move -= load_moved;
++ rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
++ maxload, sd, idle, all_pinned,
++ this_best_prio,
++ &cfs_rq_iterator);
+
+- if (max_nr_move <= 0 || rem_load_move <= 0)
++ if (rem_load_move <= 0)
+ break;
+ }
+
+ return max_load_move - rem_load_move;
+ }
+
++static int
++move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ struct cfs_rq *busy_cfs_rq;
++ struct rq_iterator cfs_rq_iterator;
++
++ cfs_rq_iterator.start = load_balance_start_fair;
++ cfs_rq_iterator.next = load_balance_next_fair;
++
++ for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
++ /*
++ * pass busy_cfs_rq argument into
++ * load_balance_[start|next]_fair iterators
++ */
++ cfs_rq_iterator.arg = busy_cfs_rq;
++ if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
++ &cfs_rq_iterator))
++ return 1;
++ }
++
++ return 0;
++}
++#endif
++
+ /*
+ * scheduler tick hitting a task of our scheduling class:
+ */
+ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
+ {
+@@ -1151,51 +1051,44 @@ static void task_tick_fair(struct rq *rq
+ cfs_rq = cfs_rq_of(se);
+ entity_tick(cfs_rq, se);
+ }
+ }
+
++#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
++
+ /*
+ * Share the fairness runtime between parent and child, thus the
+ * total amount of pressure for CPU stays equal - new tasks
+ * get a chance to run but frequent forkers are not allowed to
+ * monopolize the CPU. Note: the parent runqueue is locked,
+ * the child is not running yet.
+ */
+ static void task_new_fair(struct rq *rq, struct task_struct *p)
+ {
+ struct cfs_rq *cfs_rq = task_cfs_rq(p);
+- struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
++ struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
++ int this_cpu = smp_processor_id();
+
+ sched_info_queued(p);
+
+ update_curr(cfs_rq);
+- update_stats_enqueue(cfs_rq, se);
+- /*
+- * Child runs first: we let it run before the parent
+- * until it reschedules once. We set up the key so that
+- * it will preempt the parent:
+- */
+- se->fair_key = curr->fair_key -
+- niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
+- /*
+- * The first wait is dominated by the child-runs-first logic,
+- * so do not credit it with that waiting time yet:
+- */
+- if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
+- se->wait_start_fair = 0;
++ place_entity(cfs_rq, se, 1);
+
+- /*
+- * The statistical average of wait_runtime is about
+- * -granularity/2, so initialize the task with that:
+- */
+- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
+- se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
++ /* 'curr' will be NULL if the child belongs to a different group */
++ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
++ curr && curr->vruntime < se->vruntime) {
++ /*
++ * Upon rescheduling, sched_class::put_prev_task() will place
++ * 'current' within the tree based on its new key value.
++ */
++ swap(curr->vruntime, se->vruntime);
++ }
+
+- __enqueue_entity(cfs_rq, se);
++ enqueue_task_fair(rq, p, 0);
++ resched_task(rq->curr);
+ }
+
+-#ifdef CONFIG_FAIR_GROUP_SCHED
+ /* Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+@@ -1204,30 +1097,29 @@ static void set_curr_task_fair(struct rq
+ struct sched_entity *se = &rq->curr->se;
+
+ for_each_sched_entity(se)
+ set_next_entity(cfs_rq_of(se), se);
+ }
+-#else
+-static void set_curr_task_fair(struct rq *rq)
+-{
+-}
+-#endif
+
+ /*
+ * All the scheduling class methods:
+ */
+-struct sched_class fair_sched_class __read_mostly = {
++static const struct sched_class fair_sched_class = {
++ .next = &idle_sched_class,
+ .enqueue_task = enqueue_task_fair,
+ .dequeue_task = dequeue_task_fair,
+ .yield_task = yield_task_fair,
+
+- .check_preempt_curr = check_preempt_curr_fair,
++ .check_preempt_curr = check_preempt_wakeup,
+
+ .pick_next_task = pick_next_task_fair,
+ .put_prev_task = put_prev_task_fair,
+
++#ifdef CONFIG_SMP
+ .load_balance = load_balance_fair,
++ .move_one_task = move_one_task_fair,
++#endif
+
+ .set_curr_task = set_curr_task_fair,
+ .task_tick = task_tick_fair,
+ .task_new = task_new_fair,
+ };
+@@ -1235,9 +1127,12 @@ struct sched_class fair_sched_class __re
+ #ifdef CONFIG_SCHED_DEBUG
+ static void print_cfs_stats(struct seq_file *m, int cpu)
+ {
+ struct cfs_rq *cfs_rq;
+
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
++#endif
+ for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
+ print_cfs_rq(m, cpu, cfs_rq);
+ }
+ #endif
+--- linux-2.6.23.orig/kernel/sched_idletask.c
++++ linux-2.6.23/kernel/sched_idletask.c
+@@ -35,37 +35,55 @@ dequeue_task_idle(struct rq *rq, struct
+
+ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+ {
+ }
+
++#ifdef CONFIG_SMP
+ static unsigned long
+ load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, int *this_best_prio)
++ unsigned long max_load_move,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned, int *this_best_prio)
+ {
+ return 0;
+ }
+
++static int
++move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ return 0;
++}
++#endif
++
+ static void task_tick_idle(struct rq *rq, struct task_struct *curr)
+ {
+ }
+
++static void set_curr_task_idle(struct rq *rq)
++{
++}
++
+ /*
+ * Simple, special scheduling class for the per-CPU idle tasks:
+ */
+-static struct sched_class idle_sched_class __read_mostly = {
++const struct sched_class idle_sched_class = {
++ /* .next is NULL */
+ /* no enqueue/yield_task for idle tasks */
+
+ /* dequeue is not valid, we print a debug message there: */
+ .dequeue_task = dequeue_task_idle,
+
+ .check_preempt_curr = check_preempt_curr_idle,
+
+ .pick_next_task = pick_next_task_idle,
+ .put_prev_task = put_prev_task_idle,
+
++#ifdef CONFIG_SMP
+ .load_balance = load_balance_idle,
++ .move_one_task = move_one_task_idle,
++#endif
+
++ .set_curr_task = set_curr_task_idle,
+ .task_tick = task_tick_idle,
+ /* no .task_new for idle tasks */
+ };
+--- linux-2.6.23.orig/kernel/sched_rt.c
++++ linux-2.6.23/kernel/sched_rt.c
+@@ -5,11 +5,11 @@
+
+ /*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+-static inline void update_curr_rt(struct rq *rq)
++static void update_curr_rt(struct rq *rq)
+ {
+ struct task_struct *curr = rq->curr;
+ u64 delta_exec;
+
+ if (!task_has_rt_policy(curr))
+@@ -21,10 +21,11 @@ static inline void update_curr_rt(struct
+
+ schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
+
+ curr->se.sum_exec_runtime += delta_exec;
+ curr->se.exec_start = rq->clock;
++ cpuacct_charge(curr, delta_exec);
+ }
+
+ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+ {
+ struct rt_prio_array *array = &rq->rt.active;
+@@ -57,13 +58,13 @@ static void requeue_task_rt(struct rq *r
+
+ list_move_tail(&p->run_list, array->queue + p->prio);
+ }
+
+ static void
+-yield_task_rt(struct rq *rq, struct task_struct *p)
++yield_task_rt(struct rq *rq)
+ {
+- requeue_task_rt(rq, p);
++ requeue_task_rt(rq, rq->curr);
+ }
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+@@ -96,10 +97,11 @@ static void put_prev_task_rt(struct rq *
+ {
+ update_curr_rt(rq);
+ p->se.exec_start = 0;
+ }
+
++#ifdef CONFIG_SMP
+ /*
+ * Load-balancing iterator. Note: while the runqueue stays locked
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+@@ -170,45 +172,57 @@ static struct task_struct *load_balance_
+ return p;
+ }
+
+ static unsigned long
+ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
+- unsigned long max_nr_move, unsigned long max_load_move,
+- struct sched_domain *sd, enum cpu_idle_type idle,
+- int *all_pinned, int *this_best_prio)
++ unsigned long max_load_move,
++ struct sched_domain *sd, enum cpu_idle_type idle,
++ int *all_pinned, int *this_best_prio)
+ {
+- int nr_moved;
+ struct rq_iterator rt_rq_iterator;
+- unsigned long load_moved;
+
+ rt_rq_iterator.start = load_balance_start_rt;
+ rt_rq_iterator.next = load_balance_next_rt;
+ /* pass 'busiest' rq argument into
+ * load_balance_[start|next]_rt iterators
+ */
+ rt_rq_iterator.arg = busiest;
+
+- nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
+- max_load_move, sd, idle, all_pinned, &load_moved,
+- this_best_prio, &rt_rq_iterator);
++ return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
++ idle, all_pinned, this_best_prio, &rt_rq_iterator);
++}
++
++static int
++move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ struct sched_domain *sd, enum cpu_idle_type idle)
++{
++ struct rq_iterator rt_rq_iterator;
++
++ rt_rq_iterator.start = load_balance_start_rt;
++ rt_rq_iterator.next = load_balance_next_rt;
++ rt_rq_iterator.arg = busiest;
+
+- return load_moved;
++ return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
++ &rt_rq_iterator);
+ }
++#endif
+
+ static void task_tick_rt(struct rq *rq, struct task_struct *p)
+ {
++ update_curr_rt(rq);
++
+ /*
+ * RR tasks need a special form of timeslice management.
+ * FIFO tasks have no timeslices.
+ */
+ if (p->policy != SCHED_RR)
+ return;
+
+ if (--p->time_slice)
+ return;
+
+- p->time_slice = static_prio_timeslice(p->static_prio);
++ p->time_slice = DEF_TIMESLICE;
+
+ /*
+ * Requeue to the end of queue if we are not the only element
+ * on the queue:
+ */
+@@ -216,19 +230,31 @@ static void task_tick_rt(struct rq *rq,
+ requeue_task_rt(rq, p);
+ set_tsk_need_resched(p);
+ }
+ }
+
+-static struct sched_class rt_sched_class __read_mostly = {
++static void set_curr_task_rt(struct rq *rq)
++{
++ struct task_struct *p = rq->curr;
++
++ p->se.exec_start = rq->clock;
++}
++
++const struct sched_class rt_sched_class = {
++ .next = &fair_sched_class,
+ .enqueue_task = enqueue_task_rt,
+ .dequeue_task = dequeue_task_rt,
+ .yield_task = yield_task_rt,
+
+ .check_preempt_curr = check_preempt_curr_rt,
+
+ .pick_next_task = pick_next_task_rt,
+ .put_prev_task = put_prev_task_rt,
+
++#ifdef CONFIG_SMP
+ .load_balance = load_balance_rt,
++ .move_one_task = move_one_task_rt,
++#endif
+
++ .set_curr_task = set_curr_task_rt,
+ .task_tick = task_tick_rt,
+ };
+--- linux-2.6.23.orig/kernel/sched_stats.h
++++ linux-2.6.23/kernel/sched_stats.h
+@@ -14,22 +14,22 @@ static int show_schedstat(struct seq_fil
+ seq_printf(seq, "timestamp %lu\n", jiffies);
+ for_each_online_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ #ifdef CONFIG_SMP
+ struct sched_domain *sd;
+- int dcnt = 0;
++ int dcount = 0;
+ #endif
+
+ /* runqueue-specific stats */
+ seq_printf(seq,
+- "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
++ "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
+ cpu, rq->yld_both_empty,
+- rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
+- rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
+- rq->ttwu_cnt, rq->ttwu_local,
++ rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
++ rq->sched_switch, rq->sched_count, rq->sched_goidle,
++ rq->ttwu_count, rq->ttwu_local,
+ rq->rq_sched_info.cpu_time,
+- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
++ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+
+ seq_printf(seq, "\n");
+
+ #ifdef CONFIG_SMP
+ /* domain-specific stats */
+@@ -37,29 +37,28 @@ static int show_schedstat(struct seq_fil
+ for_each_domain(cpu, sd) {
+ enum cpu_idle_type itype;
+ char mask_str[NR_CPUS];
+
+ cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
+- seq_printf(seq, "domain%d %s", dcnt++, mask_str);
++ seq_printf(seq, "domain%d %s", dcount++, mask_str);
+ for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+ itype++) {
+- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
+- "%lu",
+- sd->lb_cnt[itype],
++ seq_printf(seq, " %u %u %u %u %u %u %u %u",
++ sd->lb_count[itype],
+ sd->lb_balanced[itype],
+ sd->lb_failed[itype],
+ sd->lb_imbalance[itype],
+ sd->lb_gained[itype],
+ sd->lb_hot_gained[itype],
+ sd->lb_nobusyq[itype],
+ sd->lb_nobusyg[itype]);
+ }
+- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
+- " %lu %lu %lu\n",
+- sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
+- sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
+- sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
++ seq_printf(seq,
++ " %u %u %u %u %u %u %u %u %u %u %u %u\n",
++ sd->alb_count, sd->alb_failed, sd->alb_pushed,
++ sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
++ sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
+ sd->ttwu_wake_remote, sd->ttwu_move_affine,
+ sd->ttwu_move_balance);
+ }
+ preempt_enable();
+ #endif
+@@ -99,11 +98,11 @@ const struct file_operations proc_scheds
+ static inline void
+ rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
+ {
+ if (rq) {
+ rq->rq_sched_info.run_delay += delta;
+- rq->rq_sched_info.pcnt++;
++ rq->rq_sched_info.pcount++;
+ }
+ }
+
+ /*
+ * Expects runqueue lock to be held for atomicity of update
+@@ -155,18 +154,18 @@ static inline void sched_info_dequeued(s
+ * long it was waiting to run. We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+ static void sched_info_arrive(struct task_struct *t)
+ {
+- unsigned long long now = sched_clock(), delta = 0;
++ unsigned long long now = task_rq(t)->clock, delta = 0;
+
+ if (t->sched_info.last_queued)
+ delta = now - t->sched_info.last_queued;
+ sched_info_dequeued(t);
+ t->sched_info.run_delay += delta;
+ t->sched_info.last_arrival = now;
+- t->sched_info.pcnt++;
++ t->sched_info.pcount++;
+
+ rq_sched_info_arrive(task_rq(t), delta);
+ }
+
+ /*
+@@ -186,20 +185,21 @@ static void sched_info_arrive(struct tas
+ */
+ static inline void sched_info_queued(struct task_struct *t)
+ {
+ if (unlikely(sched_info_on()))
+ if (!t->sched_info.last_queued)
+- t->sched_info.last_queued = sched_clock();
++ t->sched_info.last_queued = task_rq(t)->clock;
+ }
+
+ /*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily. Now we can calculate how long we ran.
+ */
+ static inline void sched_info_depart(struct task_struct *t)
+ {
+- unsigned long long delta = sched_clock() - t->sched_info.last_arrival;
++ unsigned long long delta = task_rq(t)->clock -
++ t->sched_info.last_arrival;
+
+ t->sched_info.cpu_time += delta;
+ rq_sched_info_depart(task_rq(t), delta);
+ }
+
+--- linux-2.6.23.orig/kernel/sysctl.c
++++ linux-2.6.23/kernel/sysctl.c
+@@ -211,35 +211,35 @@ static ctl_table root_table[] = {
+ { .ctl_name = 0 }
+ };
+
+ #ifdef CONFIG_SCHED_DEBUG
+ static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
+-static unsigned long max_sched_granularity_ns = 1000000000; /* 1 second */
++static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+ static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
+-static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
++static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+ #endif
+
+-static ctl_table kern_table[] = {
++static struct ctl_table kern_table[] = {
+ #ifdef CONFIG_SCHED_DEBUG
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_min_granularity_ns",
+ .data = &sysctl_sched_min_granularity,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
++ .proc_handler = &sched_nr_latency_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_sched_granularity_ns,
+ .extra2 = &max_sched_granularity_ns,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_latency_ns",
+ .data = &sysctl_sched_latency,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
++ .proc_handler = &sched_nr_latency_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_sched_granularity_ns,
+ .extra2 = &max_sched_granularity_ns,
+ },
+ {
+@@ -264,47 +264,43 @@ static ctl_table kern_table[] = {
+ .extra1 = &min_wakeup_granularity_ns,
+ .extra2 = &max_wakeup_granularity_ns,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_stat_granularity_ns",
+- .data = &sysctl_sched_stat_granularity,
++ .procname = "sched_child_runs_first",
++ .data = &sysctl_sched_child_runs_first,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
+- .strategy = &sysctl_intvec,
+- .extra1 = &min_wakeup_granularity_ns,
+- .extra2 = &max_wakeup_granularity_ns,
++ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_runtime_limit_ns",
+- .data = &sysctl_sched_runtime_limit,
++ .procname = "sched_features",
++ .data = &sysctl_sched_features,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = &proc_dointvec_minmax,
+- .strategy = &sysctl_intvec,
+- .extra1 = &min_sched_granularity_ns,
+- .extra2 = &max_sched_granularity_ns,
++ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_child_runs_first",
+- .data = &sysctl_sched_child_runs_first,
++ .procname = "sched_migration_cost",
++ .data = &sysctl_sched_migration_cost,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
++#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
+ {
+ .ctl_name = CTL_UNNUMBERED,
+- .procname = "sched_features",
+- .data = &sysctl_sched_features,
++ .procname = "sched_nr_migrate",
++ .data = &sysctl_sched_nr_migrate,
+ .maxlen = sizeof(unsigned int),
+- .mode = 0644,
++ .mode = 644,
+ .proc_handler = &proc_dointvec,
+ },
+ #endif
++#endif
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_compat_yield",
+ .data = &sysctl_sched_compat_yield,
+ .maxlen = sizeof(unsigned int),
+--- linux-2.6.23.orig/kernel/timer.c
++++ linux-2.6.23/kernel/timer.c
+@@ -824,14 +824,17 @@ void update_process_times(int user_tick)
+ {
+ struct task_struct *p = current;
+ int cpu = smp_processor_id();
+
+ /* Note: this timer irq context must be accounted for as well. */
+- if (user_tick)
++ if (user_tick) {
+ account_user_time(p, jiffies_to_cputime(1));
+- else
++ account_user_time_scaled(p, jiffies_to_cputime(1));
++ } else {
+ account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
++ account_system_time_scaled(p, jiffies_to_cputime(1));
++ }
+ run_local_timers();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, user_tick);
+ scheduler_tick();
+ run_posix_cpu_timers(p);
+--- linux-2.6.23.orig/kernel/tsacct.c
++++ linux-2.6.23/kernel/tsacct.c
+@@ -60,10 +60,14 @@ void bacct_add_tsk(struct taskstats *sta
+ stats->ac_ppid = pid_alive(tsk) ?
+ rcu_dereference(tsk->real_parent)->tgid : 0;
+ rcu_read_unlock();
+ stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
+ stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
++ stats->ac_utimescaled =
++ cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
++ stats->ac_stimescaled =
++ cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
+ stats->ac_minflt = tsk->min_flt;
+ stats->ac_majflt = tsk->maj_flt;
+
+ strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm));
+ }
+--- linux-2.6.23.orig/kernel/user.c
++++ linux-2.6.23/kernel/user.c
+@@ -48,40 +48,242 @@ struct user_struct root_user = {
+ .locked_shm = 0,
+ #ifdef CONFIG_KEYS
+ .uid_keyring = &root_user_keyring,
+ .session_keyring = &root_session_keyring,
+ #endif
++#ifdef CONFIG_FAIR_USER_SCHED
++ .tg = &init_task_group,
++#endif
+ };
+
+ /*
+ * These routines must be called with the uidhash spinlock held!
+ */
+-static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
++static inline void uid_hash_insert(struct user_struct *up,
++ struct hlist_head *hashent)
+ {
+ hlist_add_head(&up->uidhash_node, hashent);
+ }
+
+ static inline void uid_hash_remove(struct user_struct *up)
+ {
+ hlist_del_init(&up->uidhash_node);
+ }
+
+-static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
++static inline struct user_struct *uid_hash_find(uid_t uid,
++ struct hlist_head *hashent)
+ {
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+- if(user->uid == uid) {
++ if (user->uid == uid) {
+ atomic_inc(&user->__count);
+ return user;
+ }
+ }
+
+ return NULL;
+ }
+
++#ifdef CONFIG_FAIR_USER_SCHED
++
++static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
++static DEFINE_MUTEX(uids_mutex);
++
++static void sched_destroy_user(struct user_struct *up)
++{
++ sched_destroy_group(up->tg);
++}
++
++static int sched_create_user(struct user_struct *up)
++{
++ int rc = 0;
++
++ up->tg = sched_create_group();
++ if (IS_ERR(up->tg))
++ rc = -ENOMEM;
++
++ return rc;
++}
++
++static void sched_switch_user(struct task_struct *p)
++{
++ sched_move_task(p);
++}
++
++static inline void uids_mutex_lock(void)
++{
++ mutex_lock(&uids_mutex);
++}
++
++static inline void uids_mutex_unlock(void)
++{
++ mutex_unlock(&uids_mutex);
++}
++
++/* return cpu shares held by the user */
++ssize_t cpu_shares_show(struct kset *kset, char *buffer)
++{
++ struct user_struct *up = container_of(kset, struct user_struct, kset);
++
++ return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
++}
++
++/* modify cpu shares held by the user */
++ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
++{
++ struct user_struct *up = container_of(kset, struct user_struct, kset);
++ unsigned long shares;
++ int rc;
++
++ sscanf(buffer, "%lu", &shares);
++
++ rc = sched_group_set_shares(up->tg, shares);
++
++ return (rc ? rc : size);
++}
++
++static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
++{
++ sa->attr.name = name; sa->attr.owner = NULL;
++ sa->attr.mode = mode;
++ sa->show = cpu_shares_show;
++ sa->store = cpu_shares_store;
++}
++
++/* Create "/sys/kernel/uids/<uid>" directory and
++ * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
++ */
++static int user_kobject_create(struct user_struct *up)
++{
++ struct kset *kset = &up->kset;
++ struct kobject *kobj = &kset->kobj;
++ int error;
++
++ memset(kset, 0, sizeof(struct kset));
++ kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
++ kobject_set_name(kobj, "%d", up->uid);
++ kset_init(kset);
++ user_attr_init(&up->user_attr, "cpu_share", 0644);
++
++ error = kobject_add(kobj);
++ if (error)
++ goto done;
++
++ error = sysfs_create_file(kobj, &up->user_attr.attr);
++ if (error)
++ kobject_del(kobj);
++
++ kobject_uevent(kobj, KOBJ_ADD);
++
++done:
++ return error;
++}
++
++/* create these in sysfs filesystem:
++ * "/sys/kernel/uids" directory
++ * "/sys/kernel/uids/0" directory (for root user)
++ * "/sys/kernel/uids/0/cpu_share" file (for root user)
++ */
++int __init uids_kobject_init(void)
++{
++ int error;
++
++ /* create under /sys/kernel dir */
++ uids_kobject.parent = &kernel_subsys.kobj;
++ uids_kobject.kset = &kernel_subsys;
++ kobject_set_name(&uids_kobject, "uids");
++ kobject_init(&uids_kobject);
++
++ error = kobject_add(&uids_kobject);
++ if (!error)
++ error = user_kobject_create(&root_user);
++
++ return error;
++}
++
++/* work function to remove sysfs directory for a user and free up
++ * corresponding structures.
++ */
++static void remove_user_sysfs_dir(struct work_struct *w)
++{
++ struct user_struct *up = container_of(w, struct user_struct, work);
++ struct kobject *kobj = &up->kset.kobj;
++ unsigned long flags;
++ int remove_user = 0;
++
++ /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
++ * atomic.
++ */
++ uids_mutex_lock();
++
++ local_irq_save(flags);
++
++ if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
++ uid_hash_remove(up);
++ remove_user = 1;
++ spin_unlock_irqrestore(&uidhash_lock, flags);
++ } else {
++ local_irq_restore(flags);
++ }
++
++ if (!remove_user)
++ goto done;
++
++ sysfs_remove_file(kobj, &up->user_attr.attr);
++ kobject_uevent(kobj, KOBJ_REMOVE);
++ kobject_del(kobj);
++
++ sched_destroy_user(up);
++ key_put(up->uid_keyring);
++ key_put(up->session_keyring);
++ kmem_cache_free(uid_cachep, up);
++
++done:
++ uids_mutex_unlock();
++}
++
++/* IRQs are disabled and uidhash_lock is held upon function entry.
++ * IRQ state (as stored in flags) is restored and uidhash_lock released
++ * upon function exit.
++ */
++static inline void free_user(struct user_struct *up, unsigned long flags)
++{
++ /* restore back the count */
++ atomic_inc(&up->__count);
++ spin_unlock_irqrestore(&uidhash_lock, flags);
++
++ INIT_WORK(&up->work, remove_user_sysfs_dir);
++ schedule_work(&up->work);
++}
++
++#else /* CONFIG_FAIR_USER_SCHED */
++
++static void sched_destroy_user(struct user_struct *up) { }
++static int sched_create_user(struct user_struct *up) { return 0; }
++static void sched_switch_user(struct task_struct *p) { }
++static inline int user_kobject_create(struct user_struct *up) { return 0; }
++static inline void uids_mutex_lock(void) { }
++static inline void uids_mutex_unlock(void) { }
++
++/* IRQs are disabled and uidhash_lock is held upon function entry.
++ * IRQ state (as stored in flags) is restored and uidhash_lock released
++ * upon function exit.
++ */
++static inline void free_user(struct user_struct *up, unsigned long flags)
++{
++ uid_hash_remove(up);
++ spin_unlock_irqrestore(&uidhash_lock, flags);
++ sched_destroy_user(up);
++ key_put(up->uid_keyring);
++ key_put(up->session_keyring);
++ kmem_cache_free(uid_cachep, up);
++}
++
++#endif /* CONFIG_FAIR_USER_SCHED */
++
+ /*
+ * Locate the user_struct for the passed UID. If found, take a ref on it. The
+ * caller must undo that ref with free_uid().
+ *
+ * If the user_struct could not be found, return NULL.
+@@ -104,26 +306,26 @@ void free_uid(struct user_struct *up)
+
+ if (!up)
+ return;
+
+ local_irq_save(flags);
+- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+- uid_hash_remove(up);
+- spin_unlock_irqrestore(&uidhash_lock, flags);
+- key_put(up->uid_keyring);
+- key_put(up->session_keyring);
+- kmem_cache_free(uid_cachep, up);
+- } else {
++ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
++ free_user(up, flags);
++ else
+ local_irq_restore(flags);
+- }
+ }
+
+ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
+ {
+ struct hlist_head *hashent = uidhashentry(ns, uid);
+ struct user_struct *up;
+
++ /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
++ * atomic.
++ */
++ uids_mutex_lock();
++
+ spin_lock_irq(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ spin_unlock_irq(&uidhash_lock);
+
+ if (!up) {
+@@ -148,27 +350,51 @@ struct user_struct * alloc_uid(struct us
+ if (alloc_uid_keyring(new, current) < 0) {
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
+
++ if (sched_create_user(new) < 0) {
++ key_put(new->uid_keyring);
++ key_put(new->session_keyring);
++ kmem_cache_free(uid_cachep, new);
++ return NULL;
++ }
++
++ if (user_kobject_create(new)) {
++ sched_destroy_user(new);
++ key_put(new->uid_keyring);
++ key_put(new->session_keyring);
++ kmem_cache_free(uid_cachep, new);
++ uids_mutex_unlock();
++ return NULL;
++ }
++
+ /*
+ * Before adding this, check whether we raced
+ * on adding the same user already..
+ */
+ spin_lock_irq(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ if (up) {
++ /* This case is not possible when CONFIG_FAIR_USER_SCHED
++ * is defined, since we serialize alloc_uid() using
++ * uids_mutex. Hence no need to call
++ * sched_destroy_user() or remove_user_sysfs_dir().
++ */
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
+ kmem_cache_free(uid_cachep, new);
+ } else {
+ uid_hash_insert(new, hashent);
+ up = new;
+ }
+ spin_unlock_irq(&uidhash_lock);
+
+ }
++
++ uids_mutex_unlock();
++
+ return up;
+ }
+
+ void switch_uid(struct user_struct *new_user)
+ {
+@@ -182,10 +408,11 @@ void switch_uid(struct user_struct *new_
+ old_user = current->user;
+ atomic_inc(&new_user->processes);
+ atomic_dec(&old_user->processes);
+ switch_uid_keyring(new_user);
+ current->user = new_user;
++ sched_switch_user(current);
+
+ /*
+ * We need to synchronize with __sigqueue_alloc()
+ * doing a get_uid(p->user).. If that saw the old
+ * user value, we need to wait until it has exited
+--- linux-2.6.23.orig/mm/memory_hotplug.c
++++ linux-2.6.23/mm/memory_hotplug.c
+@@ -215,10 +215,14 @@ int online_pages(unsigned long pfn, unsi
+ }
+ zone->present_pages += onlined_pages;
+ zone->zone_pgdat->node_present_pages += onlined_pages;
+
+ setup_per_zone_pages_min();
++ if (onlined_pages) {
++ kswapd_run(zone_to_nid(zone));
++ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
++ }
+
+ if (need_zonelists_rebuild)
+ build_all_zonelists();
+ vm_total_pages = nr_free_pagecache_pages();
+ writeback_set_ratelimit();
+@@ -269,13 +273,10 @@ int add_memory(int nid, u64 start, u64 s
+ if (!node_online(nid)) {
+ pgdat = hotadd_new_pgdat(nid, start);
+ if (!pgdat)
+ return -ENOMEM;
+ new_pgdat = 1;
+- ret = kswapd_run(nid);
+- if (ret)
+- goto error;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, start, size);
+
+--- linux-2.6.23.orig/mm/page_alloc.c
++++ linux-2.6.23/mm/page_alloc.c
+@@ -45,17 +45,25 @@
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+ #include "internal.h"
+
+ /*
+- * MCD - HACK: Find somewhere to initialize this EARLY, or make this
+- * initializer cleaner
++ * Array of node states.
+ */
+-nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
+-EXPORT_SYMBOL(node_online_map);
+-nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
+-EXPORT_SYMBOL(node_possible_map);
++nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
++ [N_POSSIBLE] = NODE_MASK_ALL,
++ [N_ONLINE] = { { [0] = 1UL } },
++#ifndef CONFIG_NUMA
++ [N_NORMAL_MEMORY] = { { [0] = 1UL } },
++#ifdef CONFIG_HIGHMEM
++ [N_HIGH_MEMORY] = { { [0] = 1UL } },
++#endif
++ [N_CPU] = { { [0] = 1UL } },
++#endif /* NUMA */
++};
++EXPORT_SYMBOL(node_states);
++
+ unsigned long totalram_pages __read_mostly;
+ unsigned long totalreserve_pages __read_mostly;
+ long nr_swap_pages;
+ int percpu_pagelist_fraction;
+
+@@ -2070,18 +2078,39 @@ static void build_zonelist_cache(pg_data
+ pgdat->node_zonelists[i].zlcache_ptr = NULL;
+ }
+
+ #endif /* CONFIG_NUMA */
+
++/* Any regular memory on that node ? */
++static void check_for_regular_memory(pg_data_t *pgdat)
++{
++#ifdef CONFIG_HIGHMEM
++ enum zone_type zone_type;
++
++ for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
++ struct zone *zone = &pgdat->node_zones[zone_type];
++ if (zone->present_pages)
++ node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
++ }
++#endif
++}
++
+ /* return values int ....just for stop_machine_run() */
+ static int __build_all_zonelists(void *dummy)
+ {
+ int nid;
+
+ for_each_online_node(nid) {
+- build_zonelists(NODE_DATA(nid));
+- build_zonelist_cache(NODE_DATA(nid));
++ pg_data_t *pgdat = NODE_DATA(nid);
++
++ build_zonelists(pgdat);
++ build_zonelist_cache(pgdat);
++
++ /* Any memory on that node */
++ if (pgdat->node_present_pages)
++ node_set_state(nid, N_HIGH_MEMORY);
++ check_for_regular_memory(pgdat);
+ }
+ return 0;
+ }
+
+ void build_all_zonelists(void)
+@@ -2322,18 +2351,21 @@ static struct per_cpu_pageset boot_pages
+ * per cpu pageset array in struct zone.
+ */
+ static int __cpuinit process_zones(int cpu)
+ {
+ struct zone *zone, *dzone;
++ int node = cpu_to_node(cpu);
++
++ node_set_state(node, N_CPU); /* this node has a cpu */
+
+ for_each_zone(zone) {
+
+ if (!populated_zone(zone))
+ continue;
+
+ zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
+- GFP_KERNEL, cpu_to_node(cpu));
++ GFP_KERNEL, node);
+ if (!zone_pcp(zone, cpu))
+ goto bad;
+
+ setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+
+--- linux-2.6.23.orig/mm/vmscan.c
++++ linux-2.6.23/mm/vmscan.c
+@@ -1845,11 +1845,10 @@ static int __zone_reclaim(struct zone *z
+ return nr_reclaimed >= nr_pages;
+ }
+
+ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
+ {
+- cpumask_t mask;
+ int node_id;
+
+ /*
+ * Zone reclaim reclaims unmapped file backed pages and
+ * slab pages if we are over the defined limits.
+@@ -1882,11 +1881,10 @@ int zone_reclaim(struct zone *zone, gfp_
+ * have associated processors. This will favor the local processor
+ * over remote processors and spread off node memory allocations
+ * as wide as possible.
+ */
+ node_id = zone_to_nid(zone);
+- mask = node_to_cpumask(node_id);
+- if (!cpus_empty(mask) && node_id != numa_node_id())
++ if (node_state(node_id, N_CPU) && node_id != numa_node_id())
+ return 0;
+ return __zone_reclaim(zone, gfp_mask, order);
+ }
+ #endif
+--- linux-2.6.23.orig/net/unix/af_unix.c
++++ linux-2.6.23/net/unix/af_unix.c
+@@ -331,11 +331,11 @@ static inline int unix_writable(struct s
+ static void unix_write_space(struct sock *sk)
+ {
+ read_lock(&sk->sk_callback_lock);
+ if (unix_writable(sk)) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+- wake_up_interruptible(sk->sk_sleep);
++ wake_up_interruptible_sync(sk->sk_sleep);
+ sk_wake_async(sk, 2, POLL_OUT);
+ }
+ read_unlock(&sk->sk_callback_lock);
+ }
+
+@@ -1640,11 +1640,11 @@ static int unix_dgram_recvmsg(struct kio
+ err = 0;
+ unix_state_unlock(sk);
+ goto out_unlock;
+ }
+
+- wake_up_interruptible(&u->peer_wait);
++ wake_up_interruptible_sync(&u->peer_wait);
+
+ if (msg->msg_name)
+ unix_copy_addr(msg, skb->sk);
+
+ if (size > skb->len)
diff --git a/recipes/linux/linux-2.6.24/time.h.patch b/recipes/linux/linux-2.6.23/time.h.patch
index fd22f3a01d..fd22f3a01d 100644
--- a/recipes/linux/linux-2.6.24/time.h.patch
+++ b/recipes/linux/linux-2.6.23/time.h.patch
diff --git a/recipes/linux/linux-2.6.25.20/ronetix-pm9261/defconfig b/recipes/linux/linux-2.6.25/ronetix-pm9261/defconfig
index 77cb9fafe5..77cb9fafe5 100644
--- a/recipes/linux/linux-2.6.25.20/ronetix-pm9261/defconfig
+++ b/recipes/linux/linux-2.6.25/ronetix-pm9261/defconfig
diff --git a/recipes/linux/linux-2.6.25.20/ronetix-pm9263/defconfig b/recipes/linux/linux-2.6.25/ronetix-pm9263/defconfig
index c7171cd4ae..c7171cd4ae 100644
--- a/recipes/linux/linux-2.6.25.20/ronetix-pm9263/defconfig
+++ b/recipes/linux/linux-2.6.25/ronetix-pm9263/defconfig
diff --git a/recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch b/recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch
deleted file mode 100644
index 6b8aaf4445..0000000000
--- a/recipes/linux/linux-2.6.29/micro2440/0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From a4cba996cb77da4afc26c35402a70c3f008afe96 Mon Sep 17 00:00:00 2001
-From: Michel Pollet <buserror@gmail.com>
-Date: Sat, 14 Mar 2009 10:34:32 +0000
-Subject: [PATCH] 920T: Temp(?) fix for the 40 relocation binutils problem
-
-This prevents the modules failing to load when made
-with modern toolchains. There is no way to prevent binutils
-to generate these relocations, and on the 920t they are
-in fact not needed. So this patch just skip them.
-
-Signed-off-by: Michel Pollet <buserror@gmail.com>
----
- arch/arm/include/asm/elf.h | 1 +
- arch/arm/kernel/module.c | 7 +++++++
- 2 files changed, 8 insertions(+), 0 deletions(-)
-
-diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index a58378c..ce3b36e 100644
---- a/arch/arm/include/asm/elf.h
-+++ b/arch/arm/include/asm/elf.h
-@@ -50,6 +50,7 @@ typedef struct user_fp elf_fpregset_t;
- #define R_ARM_ABS32 2
- #define R_ARM_CALL 28
- #define R_ARM_JUMP24 29
-+#define R_ARM_V4BX 40
-
- /*
- * These are used to set parameters in the core dumps.
-diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
-index dab48f2..fa03392 100644
---- a/arch/arm/kernel/module.c
-+++ b/arch/arm/kernel/module.c
-@@ -132,6 +132,13 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- *(u32 *)loc |= offset & 0x00ffffff;
- break;
-
-+#ifdef CONFIG_CPU_ARM920T
-+ /* modern toolchain generate V4BX for the modules, and there is no
-+ * way to skip them being generated in the .ko, so in our case, we just
-+ * can ignore them */
-+ case R_ARM_V4BX: /* Ignore these sections */
-+ break;
-+#endif
- default:
- printk(KERN_ERR "%s: unknown relocation: %u\n",
- module->name, ELF32_R_TYPE(rel->r_info));
---
-1.5.6.3
-
diff --git a/recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch b/recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch
deleted file mode 100644
index bfad6d80eb..0000000000
--- a/recipes/linux/linux-2.6.29/micro2440/0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 11c0b33d2a046a37bcd96528faa0e93359ef4a4b Mon Sep 17 00:00:00 2001
-From: Herbert Xu <herbert@gondor.apana.org.au>
-Date: Thu, 26 Mar 2009 00:59:10 -0700
-Subject: [PATCH] GRO: Disable GRO on legacy netif_rx path
-
-When I fixed the GRO crash in the legacy receive path I used
-napi_complete to replace __napi_complete. Unfortunately they're
-not the same when NETPOLL is enabled, which may result in us
-not calling __napi_complete at all.
-
-What's more, we really do need to keep the __napi_complete call
-within the IRQ-off section since in theory an IRQ can occur in
-between and fill up the backlog to the maximum, causing us to
-lock up.
-
-Since we can't seem to find a fix that works properly right now,
-this patch reverts all the GRO support from the netif_rx path.
-
-Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Michel Pollet <buserror@gmail.com>
----
- net/core/dev.c | 9 +++------
- 1 files changed, 3 insertions(+), 6 deletions(-)
-
-diff --git a/net/core/dev.c b/net/core/dev.c
-index e3fe5c7..e438f54 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -2588,18 +2588,15 @@ static int process_backlog(struct napi_struct *napi, int quota)
- local_irq_disable();
- skb = __skb_dequeue(&queue->input_pkt_queue);
- if (!skb) {
-+ __napi_complete(napi);
- local_irq_enable();
-- napi_complete(napi);
-- goto out;
-+ break;
- }
- local_irq_enable();
-
-- napi_gro_receive(napi, skb);
-+ netif_receive_skb(skb);
- } while (++work < quota && jiffies == start_time);
-
-- napi_gro_flush(napi);
--
--out:
- return work;
- }
-
---
-1.5.6.3
-
diff --git a/recipes/linux/linux-sgh-i900/sgh-i900-support.patch b/recipes/linux/linux-sgh-i900/sgh-i900-support.patch
new file mode 100644
index 0000000000..28d65938a1
--- /dev/null
+++ b/recipes/linux/linux-sgh-i900/sgh-i900-support.patch
@@ -0,0 +1,13031 @@
+diff -ur linux-2.6.32/arch/arm/Kconfig kernel/arch/arm/Kconfig
+--- linux-2.6.32/arch/arm/Kconfig 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/Kconfig 2009-12-12 16:09:25.656278659 +0200
+@@ -1502,6 +1502,112 @@
+ config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
++config PXA_DVFM
++ bool "PXA Processor High Level DVFM support"
++ depends on PM
++ default y
++ help
++ This enables the dynamical frequency and voltage changes framework
++ for PXA Processor series.
++
++config PXA_MIPSRAM
++ bool "PXA MIPSRAM monitoring support"
++ default n
++ help
++ Enable MIPS RAM monitoring for process switching implemented in
++ the scheduler
++
++config PXA3xx_DVFM
++ bool "PXA3xx Processor DVFM support"
++ depends on PM && PXA3xx && PXA_DVFM
++# select PXA3xx_ARAVA
++# select PXA3xx_MICCO
++ default y
++ help
++ This implements the dynamical frequency and voltage changes features
++ for PXA3xx Processor particularly.
++
++config PXA3xx_DVFM_STATS
++ bool "PXA3xx/PXA930 Processor DVFM Statistics support"
++ depends on PXA3xx_DVFM
++ select RELAY
++ select DEBUG_FS
++ default y
++ help
++ This is used to collect statistics during the dynamic frequency
++ and voltage changes
++
++config PXA3xx_PMU
++ bool "PXA3xx/PXA930 Processor PMU support"
++ default y
++ help
++ PXA3xx/PXA930 provide Performance Monitor Unit to report
++ CPU statistics info.
++
++config PXA3xx_PRM
++ bool "PXA3xx Processor Profiler Resource Manager"
++ depends on PXA3xx_DVFM && PXA3xx_PMU
++ default y
++ help
++ This enables the PXA3xx Processor Profiler Resource Manager
++
++config IPM
++ bool "Marvell(R) Scalable Power Management Profiler"
++ depends on PXA3xx_PRM
++ default y
++ help
++ Support Profiler of Marvell(R) Scalable Power Management
++
++config IPMC
++ bool "Marvell(R) Scalable Power Management Userspace Daemon"
++ depends on PXA3xx_PRM
++ default n
++ help
++ Support Userspace Daemon of Marvell(R) Scalable Power Management
++
++config BPMD
++ bool "Borqs Scalable Power Management Kernel Daemon"
++ depends on PXA3xx_PRM
++ default y
++ help
++ Kernel Daemon of Borqs Scalable Power Management
++
++config TEST_BPMD
++ bool "Borqs Scalable Power Management Test Module"
++ depends on PXA3xx_PRM
++ default y
++ help
++ Test Module of Borqs Scalable Power Management
++
++config IPM_DEEPIDLE
++ bool "PXA3xx/PXA930 Processor Deep Idle support"
++ depends on IPM
++ default y
++ help
++ This enables the kernel support for PXA3xx/PXA930
++ Processor Deep Idle (D0CS Idle)
++
++config IPM_D2IDLE
++ bool "Support PXA3xx/PXA930 Processor D2 Mode as Idle"
++ depends on IPM && PXA_32KTIMER
++ default y
++ help
++ This enables kernel support PXA3xx/PXA930 D2 idle
++
++config PERIPHERAL_STATUS
++ bool "Support list peripheral status of pm"
++ depends on PM
++ default y
++ help
++ This enables kernel support peripheral status calculate
++
++config IPM_CGIDLE
++ bool "Support PXA935 Processor Clock Gated Mode as Idle"
++ depends on IPM && PXA_32KTIMER
++ default y
++ help
++ This enables kernel support PXA935 D2 idle
++
+ endmenu
+
+ source "net/Kconfig"
+diff -ur linux-2.6.32/arch/arm/mach-pxa/Kconfig kernel/arch/arm/mach-pxa/Kconfig
+--- linux-2.6.32/arch/arm/mach-pxa/Kconfig 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/Kconfig 2009-12-12 16:09:26.426281936 +0200
+@@ -27,6 +27,12 @@
+ bool "PXA950 (codename Tavor-PV2)"
+ select CPU_PXA930
+
++config PXA3xx_PMIC
++ bool "PXA3xx PMIC support"
++ default y
++ help
++ PMIC support
++
+ endmenu
+
+ endif
+@@ -303,6 +309,18 @@
+ select HAVE_PWM
+ select PXA_HAVE_BOARD_IRQS
+
++config MACH_SGH_I900
++ bool "Samsung SGH-i900 (Omnia) phone"
++ select PXA3xx
++ select CPU_PXA310
++ select HAVE_PWM
++
++config MACH_SGH_I780
++ bool "Samsung SGH-i780 phone"
++ select PXA3xx
++ select CPU_PXA310
++ select HAVE_PWM
++
+ config MACH_LITTLETON
+ bool "PXA3xx Form Factor Platform (aka Littleton)"
+ select PXA3xx
+diff -ur linux-2.6.32/arch/arm/mach-pxa/Makefile kernel/arch/arm/mach-pxa/Makefile
+--- linux-2.6.32/arch/arm/mach-pxa/Makefile 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/Makefile 2009-12-12 16:09:26.426281936 +0200
+@@ -5,6 +5,15 @@
+ # Common support (must be linked before board specific support)
+ obj-y += clock.o devices.o generic.o irq.o \
+ time.o reset.o
++obj-$(CONFIG_PXA_DVFM) += dvfm.o
++ifeq ($(CONFIG_PXA3xx), y)
++ obj-$(CONFIG_PXA3xx_PMIC) += pxa3xx_pmic.o
++ obj-$(CONFIG_PXA3xx_DVFM) += pxa3xx_dvfm.o pxa3xx_dvfm_ll.o
++ obj-$(CONFIG_PXA3xx_PMU) += pmu.o pmu_ll.o
++ obj-$(CONFIG_PXA3xx_PRM) += prm.o
++ obj-$(CONFIG_BPMD) += bpm.o bpm_prof.o
++endif
++
+ obj-$(CONFIG_PM) += pm.o sleep.o standby.o
+
+ ifeq ($(CONFIG_CPU_FREQ),y)
+@@ -66,6 +75,8 @@
+ obj-$(CONFIG_MACH_PALMZ72) += palmz72.o
+ obj-$(CONFIG_MACH_TREO680) += treo680.o
+ obj-$(CONFIG_ARCH_VIPER) += viper.o
++obj-$(CONFIG_MACH_SGH_I900) += sgh_i780_i900.o sgh_smd.o sgh_rpc.o
++obj-$(CONFIG_MACH_SGH_I780) += sgh_i780_i900.o sgh_smd.o sgh_rpc.o
+
+ ifeq ($(CONFIG_MACH_ZYLONITE),y)
+ obj-y += zylonite.o
+diff -ur linux-2.6.32/arch/arm/mach-pxa/bpm.c kernel/arch/arm/mach-pxa/bpm.c
+--- linux-2.6.32/arch/arm/mach-pxa/bpm.c 2009-12-13 12:57:59.831957275 +0200
++++ kernel/arch/arm/mach-pxa/bpm.c 2009-12-12 16:09:26.429614458 +0200
+@@ -0,0 +1,1814 @@
++/*
++ * linux/arch/arm/mach-pxa/bpm.c
++ *
++ * Provide bpm thread to scale system voltage & frequency dynamically.
++ *
++ * Copyright (C) 2008 Borqs Corporation.
++ *
++ * Author: Emichael Li <emichael.li@borqs.com>
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++ *
++ */
++
++#include <linux/kernel.h>
++#include <mach/prm.h>
++#include <mach/dvfm.h>
++#include <mach/mspm_prof.h>
++#include <linux/sysdev.h>
++#include <linux/delay.h>
++#include <mach/bpm.h>
++#include <mach/hardware.h>
++#include <mach/pxa3xx-regs.h>
++#include <linux/list.h>
++#include <asm/io.h>
++#include <asm/mach-types.h>
++#include <linux/freezer.h>
++#include <mach/regs-ost.h>
++#ifdef CONFIG_ANDROID_POWER
++#include <linux/android_power.h>
++#endif
++
++#define DEBUG
++
++#ifdef DEBUG
++#define PM_BUG_ON(condition) \
++ do { \
++ if (unlikely(condition)) { \
++ printk(KERN_ERR "BUG: failure at %s:%d/%s()!\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ WARN_ON(1); \
++ } \
++ } while(0)
++#define DPRINTK(fmt,args...) \
++ do { \
++ if (g_bpm_log_level) \
++ printk(KERN_ERR "%s: " fmt, __FUNCTION__ , ## args); \
++ } while (0)
++#else
++#define PM_BUG_ON(condition) \
++ do { \
++ if (unlikely(condition)) { \
++ printk(KERN_ERR "BUG: failure at %s:%d/%s()!\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ } \
++ } while(0)
++#define DPRINTK(fmt,args...) \
++ do {} while (0)
++#endif
++
++/*****************************************************************************/
++/* */
++/* Policy variables */
++/* */
++/*****************************************************************************/
++#define REDUCE_624M_DUTYCYCLE (1)
++
++#define BPM_FREQ_POLICY_NUM (3)
++#define BPM_PROFILER_WINDOW (100)
++#define SYSTEM_BOOTUP_TIME (15000)
++#define BPM_MAX_OP_NUM (10)
++
++struct bpm_freq_bonus_arg {
++ int mips;
++ int mem_stall;
++};
++
++struct bpm_freq_policy {
++ int lower[BPM_FREQ_POLICY_NUM];
++ int higher[BPM_FREQ_POLICY_NUM];
++};
++
++#define CONSTRAINT_ID_LEN (32)
++struct bpm_cons {
++ struct list_head list;
++ char sid[CONSTRAINT_ID_LEN];
++ int count;
++ unsigned long ms;
++ unsigned long tmp_ms;
++ unsigned long tm;
++};
++
++struct bpm_cons_head {
++ struct list_head list;
++};
++
++/* manage all the ops which are supported by the hardware */
++static struct dvfm_op g_dyn_ops[BPM_MAX_OP_NUM];
++static spinlock_t g_dyn_ops_lock = SPIN_LOCK_UNLOCKED;
++
++static struct bpm_cons_head g_bpm_cons[BPM_MAX_OP_NUM];
++
++/* map the op from active ops to g_dyn_ops[] */
++static int g_active_ops_map[BPM_MAX_OP_NUM];
++static int g_active_ops_num;
++static int g_active_cur_idx = -1;
++static int g_prefer_op_idx;
++static int g_active_bonus[BPM_MAX_OP_NUM][BPM_MAX_OP_NUM * 2 - 1];
++struct bpm_freq_policy g_active_policy[BPM_MAX_OP_NUM];
++
++/*****************************************************************************/
++/* */
++/* Framework Supportted Variables */
++/* */
++/*****************************************************************************/
++
++int (*pipm_start_pmu) (void *) = NULL;
++EXPORT_SYMBOL(pipm_start_pmu);
++int (*pipm_stop_pmu)(void) = NULL;
++EXPORT_SYMBOL(pipm_stop_pmu);
++
++static int g_bpm_thread_exit;
++int g_bpm_enabled;
++static wait_queue_head_t g_bpm_enabled_waitq;
++
++static int g_profiler_window = BPM_PROFILER_WINDOW;
++static int g_bpm_log_level = 1;
++struct completion g_bpm_thread_over;
++
++extern struct sysdev_class cpu_sysdev_class;
++
++static struct bpm_event_queue g_bpm_event_queue;
++static spinlock_t g_bpm_event_queue_lock = SPIN_LOCK_UNLOCKED;
++
++#ifdef CONFIG_TEST_BPMD
++static int g_cpuload_mode;
++#endif
++
++static int dvfm_dev_idx;
++
++extern int __dvfm_enable_op(int index, int dev_idx);
++extern int __dvfm_disable_op2(int index, int dev_idx);
++extern int cur_op;
++extern struct info_head dvfm_trace_list;
++
++extern int g_dvfm_disabled;
++
++#ifdef CONFIG_MTD_NAND_HSS_FIX
++extern atomic_t nand_in_cmd;
++#endif
++/*****************************************************************************/
++/* */
++/* Blink Variables */
++/* */
++/*****************************************************************************/
++#define DVFM_BLINK_OWNER_LEN (16)
++
++struct dvfm_blink_info {
++ int time;
++ char name[DVFM_BLINK_OWNER_LEN];
++};
++
++static int g_dvfm_blink = 0;
++static struct timer_list g_dvfm_blink_timer;
++static struct dvfm_blink_info g_dvfm_binfo;
++static unsigned long g_dvfm_blink_timeout = 0;
++
++/*****************************************************************************/
++/* */
++/* android power interface */
++/* */
++/*****************************************************************************/
++static int g_android_suspended = 0;
++
++#ifdef CONFIG_ANDROID_POWER
++void bpm_android_suspend_handler(android_early_suspend_t *h)
++{
++ unsigned long flags;
++ local_irq_save(flags);
++ g_android_suspended = 1;
++ local_irq_restore(flags);
++}
++
++void bpm_android_resume_handler(android_early_suspend_t *h)
++{
++ unsigned long flags;
++ local_irq_save(flags);
++ g_android_suspended = 0;
++ local_irq_restore(flags);
++}
++
++static android_early_suspend_t bpm_early_suspend = {
++ .level = 98,
++ .suspend = bpm_android_suspend_handler,
++ .resume = bpm_android_resume_handler,
++};
++#endif
++
++static inline int is_out_d0cs(void)
++{
++#ifdef CONFIG_PXA3xx_DVFM
++ extern int out_d0cs;
++ return out_d0cs;
++#endif
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD Event Queue */
++/* */
++/*****************************************************************************/
++
++static int bpmq_init(void)
++{
++ g_bpm_event_queue.head = g_bpm_event_queue.tail = 0;
++ g_bpm_event_queue.len = 0;
++ init_waitqueue_head(&g_bpm_event_queue.waitq);
++ return 0;
++}
++
++static int bpmq_clear(void)
++{
++ unsigned long flag;
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ g_bpm_event_queue.head = g_bpm_event_queue.tail = 0;
++ g_bpm_event_queue.len = 0;
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ return 0;
++}
++
++static int bpmq_get(struct bpm_event *e)
++{
++ unsigned long flag;
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ if (!g_bpm_event_queue.len) {
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++ printk(KERN_ERR "Logic error, please check bpmq_empty()\n");
++ return -1;
++ }
++ memcpy(e, g_bpm_event_queue.bpmes + g_bpm_event_queue.tail,
++ sizeof(struct bpm_event));
++ g_bpm_event_queue.len--;
++ g_bpm_event_queue.tail =
++ (g_bpm_event_queue.tail + 1) % MAX_BPM_EVENT_NUM;
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ return 0;
++}
++
++static int bpmq_put(struct bpm_event *e)
++{
++ unsigned long flag;
++ static int err_cnt = 0;
++
++ if (unlikely(0 == g_bpm_enabled))
++ return 0;
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ if (g_bpm_event_queue.len == MAX_BPM_EVENT_NUM) {
++ if (++err_cnt > 0) {
++ printk(KERN_ERR "bpm queue over flow!\n");
++ show_state();
++ printk(KERN_ERR "send event many times instantly?");
++ dump_stack();
++ }
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++ return -1;
++ }
++ memcpy(g_bpm_event_queue.bpmes + g_bpm_event_queue.head, e,
++ sizeof(struct bpm_event));
++ g_bpm_event_queue.len++;
++ g_bpm_event_queue.head =
++ (g_bpm_event_queue.head + 1) % MAX_BPM_EVENT_NUM;
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ wake_up_interruptible(&g_bpm_event_queue.waitq);
++
++ return 0;
++}
++
++static __inline int bpmq_empty(void)
++{
++ return (g_bpm_event_queue.len > 0) ? 0 : 1;
++}
++
++int bpm_event_notify(int type, int kind, void *info, unsigned int info_len)
++{
++ struct bpm_event event;
++ int len = 0;
++
++ if (info_len > INFO_SIZE)
++ len = INFO_SIZE;
++ else if ((info_len < INFO_SIZE) && (info_len > 0))
++ len = info_len;
++ memset(&event, 0, sizeof(struct bpm_event));
++ event.type = type;
++ event.kind = kind;
++ if ((len > 0) && (info != NULL)) {
++ memcpy(event.info, info, len);
++ }
++ if (0 != bpmq_put(&event)) {
++ len = -1;
++ }
++
++/* DPRINTK("type: %d kind: %d, len(ret): %d\n", type, kind, len); */
++ return len;
++}
++
++EXPORT_SYMBOL(bpm_event_notify);
++
++/*****************************************************************************/
++/* */
++/* BPMD PMU Interface */
++/* */
++/*****************************************************************************/
++
++static int bpm_start_pmu(void)
++{
++ int ret = -ENXIO;
++ struct ipm_profiler_arg pmu_arg;
++
++ if (pipm_start_pmu != NULL) {
++ pmu_arg.size = sizeof(struct ipm_profiler_arg);
++/* pmu_arg.flags = IPM_IDLE_PROFILER | IPM_PMU_PROFILER; */
++ pmu_arg.flags = IPM_IDLE_PROFILER;
++ pmu_arg.window_size = g_profiler_window;
++
++ pmu_arg.pmn0 = PXA3xx_EVENT_EXMEM;
++ pmu_arg.pmn1 = PXA3xx_EVENT_DMC_NOT_EMPTY;
++ pmu_arg.pmn2 = PMU_EVENT_POWER_SAVING;
++ pmu_arg.pmn3 = PMU_EVENT_POWER_SAVING;
++
++ ret = pipm_start_pmu(&pmu_arg);
++ } else {
++ printk(KERN_CRIT "No profiler\n");
++ PM_BUG_ON(1);
++ }
++
++ return ret;
++}
++
++static int bpm_stop_pmu(void)
++{
++ pipm_stop_pmu();
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD POLICY */
++/* */
++/*****************************************************************************/
++
++static int bpm_dump_policy(void)
++{
++#define TMP_BUF_SIZE (4096)
++ int i, j;
++ char *buf = kmalloc(TMP_BUF_SIZE, GFP_KERNEL);
++ char *s = NULL;
++
++ if (NULL == buf) {
++ printk(KERN_ERR "Can not alloc memory\n");
++ return 0;
++ }
++
++ s = buf;
++ memset(s, 0, TMP_BUF_SIZE);
++
++ s += sprintf(s, "--------------BPM DUMP POLICY BEGIN--------------\n");
++ s += sprintf(s, "dyn_boot_op = %d\n", dvfm_get_defop());
++ s += sprintf(s, "g_active_ops_maps:\n");
++
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i)
++ s += sprintf(s, "%8d ", g_active_ops_map[i]);
++ s += sprintf(s, "\n");
++
++ s += sprintf(s, "g_active_ops_num: %d\n", g_active_ops_num);
++ s += sprintf(s, "g_active_cur_idx: %d\n", g_active_cur_idx);
++
++ s += sprintf(s, "g_active_policy:\n");
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i) {
++ for (j = 0; j < BPM_FREQ_POLICY_NUM; ++j) {
++ s += sprintf(s, "%8d ", g_active_policy[i].lower[j]);
++ }
++
++ for (j = 0; j < BPM_FREQ_POLICY_NUM; ++j) {
++ s += sprintf(s, "%8d ", g_active_policy[i].higher[j]);
++ }
++ s += sprintf(s, "\n");
++ }
++
++ DPRINTK("%s", buf);
++
++ s = buf;
++ memset(s, 0, TMP_BUF_SIZE);
++
++ s += sprintf(s, "g_active_bonus:\n");
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i) {
++ for (j = 0; j < BPM_MAX_OP_NUM * 2 - 1; ++j) {
++ s += sprintf(s, "%8d ", g_active_bonus[i][j]);
++ }
++ s += sprintf(s, "\n");
++ }
++
++ DPRINTK("%s", buf);
++
++ s = buf;
++ memset(s, 0, TMP_BUF_SIZE);
++
++ s += sprintf(s, "g_dyn_ops num: %d\n",
++ sizeof(g_dyn_ops) / sizeof(struct dvfm_op));
++
++ s += sprintf(s, "g_dyn_ops:\n");
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ s += sprintf(s, "%8d %8d %8d %s\n",
++ g_dyn_ops[i].index,
++ g_dyn_ops[i].count,
++ g_dyn_ops[i].cpu_freq, g_dyn_ops[i].name);
++ }
++ s += sprintf(s, "--------------BPM DUMP POLICY END----------------\n");
++
++ DPRINTK("%s", buf);
++
++ kfree(buf);
++ return 0;
++}
++
++static int build_active_ops(void)
++{
++ int i, j;
++ int pre_idx;
++ int cur_idx;
++ int pre_freq, cur_freq, pre_ratio;
++ int m, n;
++
++ memset(g_active_ops_map, -1, sizeof(g_active_ops_map));
++
++ for (i = 0, j = 0; i < BPM_MAX_OP_NUM; ++i) {
++ if (g_dyn_ops[i].count == 0 && g_dyn_ops[i].name != NULL
++ && !dvfm_check_active_op(g_dyn_ops[i].index))
++ g_active_ops_map[j++] = i;
++ }
++
++ g_active_ops_num = j;
++ g_active_cur_idx = -1;
++
++ memset(g_active_bonus, -1, sizeof(g_active_bonus));
++ memset(g_active_policy, -1, sizeof(g_active_policy));
++
++ for (i = 0; i < g_active_ops_num; ++i) {
++ g_active_policy[i].higher[0] = 80;
++ g_active_policy[i].higher[1] = 95;
++ g_active_policy[i].higher[2] = 100;
++
++ if (i == 0) {
++ memset(g_active_policy[i].lower, 0,
++ sizeof(g_active_policy[i].lower));
++ cur_idx = g_active_ops_map[i];
++ cur_freq = g_dyn_ops[cur_idx].cpu_freq;
++ if (cur_freq == 60) {
++ g_active_policy[i].higher[0] = 90;
++ }
++ } else {
++ pre_idx = g_active_ops_map[i - 1];
++ cur_idx = g_active_ops_map[i];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ cur_freq = g_dyn_ops[cur_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 1].higher[0];
++
++ g_active_policy[i].lower[2] = pre_freq * pre_ratio / cur_freq;
++
++ if (i > 1) {
++ pre_idx = g_active_ops_map[i - 2];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 2].higher[0];
++
++ g_active_policy[i].lower[1] = pre_freq * pre_ratio / cur_freq;
++ } else {
++ g_active_policy[i].lower[1] = 0;
++ }
++
++ g_active_policy[i].lower[0] = 0;
++ }
++
++ for (j = 0; j < g_active_ops_num - 1 - i; ++j) {
++ g_active_bonus[i][j] = 0;
++ }
++
++ m = g_active_ops_num - 1;
++ n = 0;
++ for (j = m - i; j < 2 * g_active_ops_num - 1; ++j) {
++ g_active_bonus[i][j] = n < m ? n : m;
++ ++n;
++ }
++
++ }
++
++ g_active_policy[i - 1].higher[0] = 100;
++ g_active_policy[i - 1].higher[1] = 100;
++ g_active_policy[i - 1].higher[2] = 100;
++
++#if REDUCE_624M_DUTYCYCLE
++ cur_idx = g_active_ops_map[i - 1];
++ cur_freq = g_dyn_ops[cur_idx].cpu_freq;
++ if (cur_freq == 624) {
++ if (i > 1) {
++ g_active_policy[i - 2].higher[0] = 96;
++ g_active_policy[i - 2].higher[1] = 100;
++
++ pre_idx = g_active_ops_map[i - 2];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 2].higher[0];
++
++ g_active_policy[i - 1].lower[2] = pre_freq * pre_ratio / cur_freq;
++ }
++ if (i > 2) {
++ g_active_policy[i - 3].higher[1] = 100;
++
++ pre_idx = g_active_ops_map[i - 3];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 3].higher[0];
++
++ g_active_policy[i - 1].lower[1] = pre_freq * pre_ratio / cur_freq;
++ }
++ }
++#endif
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* Platform Related */
++/* */
++/*****************************************************************************/
++
++int get_op_power_bonus(void)
++{
++ if (0 == g_active_cur_idx)
++ return 1;
++ else
++ return 0;
++}
++
++static int build_dyn_ops(void)
++{
++ int i;
++ int ret;
++ int op_num = 0;
++ int count, x;
++
++ struct op_info *info = NULL;
++ struct op_freq freq;
++
++ op_num = dvfm_op_count();
++ PM_BUG_ON(op_num > BPM_MAX_OP_NUM);
++
++ memset(&g_dyn_ops, -1, sizeof(g_dyn_ops));
++
++ for (i = 0; i < op_num; ++i) {
++ ret = dvfm_get_opinfo(i, &info);
++
++ PM_BUG_ON(ret);
++
++ /* calculate how much bits is set in device word */
++ x = info->device;
++ for (count = 0; x; x = x & (x - 1), count++);
++
++ g_dyn_ops[i].index = i;
++ g_dyn_ops[i].count = count;
++
++ ret = dvfm_get_op_freq(i, &freq);
++ PM_BUG_ON(ret);
++
++ g_dyn_ops[i].cpu_freq = freq.cpu_freq;
++
++ g_dyn_ops[i].name = dvfm_get_op_name(i);
++
++ PM_BUG_ON(!g_dyn_ops[i].name);
++
++ INIT_LIST_HEAD(&(g_bpm_cons[i].list));
++ }
++
++ for (i = op_num; i < BPM_MAX_OP_NUM; ++i) {
++ g_dyn_ops[i].index = -1;
++ g_dyn_ops[i].count = 0;
++ g_dyn_ops[i].cpu_freq = 0;
++ g_dyn_ops[i].name = NULL;
++
++ INIT_LIST_HEAD(&(g_bpm_cons[i].list));
++ }
++
++ return 0;
++}
++
++static int get_dyn_idx(int active_idx)
++{
++ int t;
++ t = g_active_ops_map[active_idx];
++ return g_dyn_ops[t].index;
++}
++
++static int get_cur_freq(void)
++{
++ PM_BUG_ON(g_active_cur_idx == -1);
++ return g_dyn_ops[get_dyn_idx(g_active_cur_idx)].cpu_freq;
++}
++
++static int calc_new_idx(int bonus)
++{
++ int new_idx;
++
++ new_idx =
++ g_active_bonus[g_active_cur_idx][bonus + g_active_ops_num - 1];
++
++ return new_idx;
++}
++
++static int calc_bonus(struct bpm_freq_bonus_arg *parg)
++{
++ int i;
++ int bonus = 0;
++ int mem_stall = parg->mem_stall;
++ int mipsload = parg->mips * 100 / get_cur_freq();
++ int cpuload = mipsload > 100 ? 100 : mipsload;
++
++ PM_BUG_ON(cpuload > 100 || cpuload < 0);
++
++ for (i = 0; i < BPM_FREQ_POLICY_NUM; ++i) {
++ if (cpuload > g_active_policy[g_active_cur_idx].higher[i]) {
++ bonus += 1;
++// break; /* FIX ME: change the freq one by one */
++ }
++ }
++
++ for (i = BPM_FREQ_POLICY_NUM - 1; i >= 0; --i) {
++ if (cpuload < g_active_policy[g_active_cur_idx].lower[i]) {
++ bonus -= 1;
++// break; /* FIX ME: change the freq one by one */
++ }
++ }
++
++ /* memory bound */
++ if (bonus <= 0 && mem_stall > 17)
++ bonus = 1;
++
++ /* change to user_sleep policy ... */
++ if (g_android_suspended && (g_active_cur_idx <= 1))
++ bonus -= 1;
++
++ if (bonus > g_active_ops_num - 1)
++ bonus = g_active_ops_num - 1;
++ else if (bonus < 1 - g_active_ops_num)
++ bonus = 1 - g_active_ops_num;
++
++ return bonus;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD API */
++/* */
++/*****************************************************************************/
++
++static int bpm_change_op(int cur_idx, int new_idx)
++{
++ int ret;
++ struct dvfm_freqs freqs;
++ unsigned int oscr;
++
++ freqs.old = cur_idx;
++ freqs.new = new_idx;
++ oscr = OSCR;
++ ret = dvfm_set_op(&freqs, freqs.new, RELATION_STICK);
++ oscr = OSCR - oscr;
++ DPRINTK("old: %d cur: %d (tm: %d)\n", cur_idx, new_idx, oscr/325);
++/*
++ DPRINTK("ACCR: 0x%x ACSR: 0x%x AVCR: 0x%x SVCR: 0x%x CVCR: 0x%x\n",
++ ACCR, ACSR, AVCR, SVCR, CVCR);
++*/
++ return ret;
++}
++
++/* this function need to be refatored later? */
++int bpm_disable_op(int dyn_idx, int dev_idx)
++{
++ int i;
++ int ret = 0;
++ int cur_op_idx = -1, op_idx;
++ int next_op_idx = -1, next_active_idx = -1;
++
++ op_idx = g_dyn_ops[dyn_idx].index;
++
++ /* save current op information */
++ if (g_active_cur_idx != -1) {
++ cur_op_idx = get_dyn_idx(g_active_cur_idx);
++ }
++
++ if (!dvfm_check_active_op(op_idx) && g_active_ops_num == 1 &&
++ cur_op_idx == op_idx) {
++ printk(KERN_ERR "Can't disable this op %d\n", op_idx);
++ bpm_dump_policy();
++ return -1;
++ }
++
++ /*
++ * it should be at least two enabled ops here,
++ * otherwise it cannot come here if there is one enabled op.
++ */
++ if ((g_active_cur_idx != -1) && (g_active_ops_num > 1)) {
++ if (g_active_cur_idx == (g_active_ops_num - 1)) {
++ next_op_idx = get_dyn_idx(g_active_cur_idx - 1);
++ PM_BUG_ON((g_active_cur_idx - 1) < 0);
++ if ((g_active_cur_idx - 1) < 0) {
++ printk(KERN_ERR "err: %d %d\n", g_active_cur_idx, g_active_ops_num);
++ bpm_dump_policy();
++ }
++ } else {
++ next_op_idx = get_dyn_idx(g_active_cur_idx + 1);
++ PM_BUG_ON((g_active_cur_idx + 1) > (g_active_ops_num - 1));
++ if ((g_active_cur_idx + 1) > (g_active_ops_num - 1)) {
++ printk(KERN_ERR "err2: %d %d\n", g_active_cur_idx, g_active_ops_num);
++ bpm_dump_policy();
++ }
++ }
++ }
++
++ g_dyn_ops[dyn_idx].count++;
++
++ __dvfm_disable_op2(op_idx, dev_idx);
++
++ if (!dvfm_check_active_op(op_idx) && g_dyn_ops[dyn_idx].count == 1) {
++ build_active_ops();
++ }
++
++ if (cur_op_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == cur_op_idx) {
++ g_active_cur_idx = i;
++ break;
++ }
++ }
++
++ /* the disabled op is previous op, change to another op */
++ if (g_active_cur_idx == -1) {
++
++ /* find next op */
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == next_op_idx) {
++ next_active_idx = i;
++ break;
++ }
++ }
++
++ PM_BUG_ON(cur_op_idx != op_idx);
++ PM_BUG_ON(next_op_idx != get_dyn_idx(next_active_idx));
++ g_active_cur_idx = next_active_idx;
++ ret = bpm_change_op(cur_op_idx, next_op_idx);
++ PM_BUG_ON(ret);
++ }
++ }
++
++ return ret;
++}
++
++int bpm_enable_op(int dyn_idx, int dev_idx)
++{
++ int i, cur_op_idx = -1;
++
++ if (g_dyn_ops[dyn_idx].count <= 0) {
++ printk(KERN_ERR "are you disable this op before?\n");
++ return -1;
++ }
++
++ /* save current op information */
++ if (g_active_cur_idx != -1) {
++ cur_op_idx = get_dyn_idx(g_active_cur_idx);
++ }
++
++ g_dyn_ops[dyn_idx].count--;
++
++ if (g_dyn_ops[dyn_idx].count == 0)
++ build_active_ops();
++
++ __dvfm_enable_op(g_dyn_ops[dyn_idx].index, dev_idx);
++
++ if (cur_op_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == cur_op_idx) {
++ g_active_cur_idx = i;
++ break;
++ }
++ }
++ }
++
++ return 0;
++}
++
++int bpm_enable_op_name(char *name, int dev_idx, char *sid)
++{
++ unsigned long flag;
++ int ret = 0, new_idx = -1;
++ int i, found;
++ struct list_head *list = NULL;
++ struct bpm_cons *p = NULL;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ if (g_dyn_ops[i].name != NULL &&
++ (!strncmp(name, g_dyn_ops[i].name, sizeof(name)))) {
++ ret = bpm_enable_op(i, dev_idx);
++
++ if (!ret) {
++ found = 0;
++ list_for_each(list, &(g_bpm_cons[i].list)) {
++ p = list_entry(list, struct bpm_cons, list);
++ if (!strncmp(p->sid, sid, CONSTRAINT_ID_LEN - 1)) {
++ found = 1;
++ PM_BUG_ON(p->count <= 0);
++ p->count--;
++ if (p->tmp_ms) {
++ p->tm++;
++ p->ms += (OSCR / 3250 - p->tmp_ms);
++ }
++ break;
++ }
++ }
++ PM_BUG_ON(!found);
++ } else {
++ printk(KERN_ERR "%s use PM interface rightly!\n", sid);
++ PM_BUG_ON(1);
++ }
++ break;
++ }
++ }
++
++ if (i == sizeof(g_dyn_ops) / sizeof(struct dvfm_op)) {
++// printk(KERN_ERR "Cannot find and enable op name %s\n", name);
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ /* Change to prefrer op */
++ if (g_prefer_op_idx != cur_op && g_active_cur_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == g_prefer_op_idx) {
++ new_idx = i;
++ break;
++ }
++ }
++
++ if (new_idx != -1) {
++ ret = bpm_change_op(get_dyn_idx(g_active_cur_idx), get_dyn_idx(new_idx));
++ if (0 == ret)
++ g_active_cur_idx = new_idx;
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++ }
++ }
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return ret;
++}
++
++int bpm_disable_op_name(char *name, int dev_idx, char *sid)
++{
++ unsigned long flag;
++ int ret = -1;
++ int i;
++ int find = 0;
++ struct list_head *list = NULL;
++ struct bpm_cons *p = NULL;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ if (g_dyn_ops[i].name != NULL &&
++ (!strncmp(name, g_dyn_ops[i].name, sizeof(name)))) {
++ ret = bpm_disable_op(i, dev_idx);
++
++ if (!ret) {
++ list_for_each(list, &(g_bpm_cons[i].list)) {
++ p = list_entry(list, struct bpm_cons, list);
++ if (!strncmp(p->sid, sid, CONSTRAINT_ID_LEN - 1)) {
++ p->count++;
++ p->tmp_ms = OSCR / 3250;
++ find = 1;
++ break;
++ }
++ }
++
++ if (find == 0) {
++ p = (struct bpm_cons *)kzalloc(sizeof(struct bpm_cons), GFP_KERNEL);
++ strncpy(p->sid, sid, CONSTRAINT_ID_LEN - 1);
++ p->count = 1;
++ list_add_tail(&(p->list), &(g_bpm_cons[i].list));
++ }
++ }
++ break;
++ }
++ }
++
++ if (i == sizeof(g_dyn_ops) / sizeof(struct dvfm_op)) {
++// printk(KERN_ERR "Cannot find and disable op name %s\n", name);
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return ret;
++}
++
++static int handle_profiler_arg(struct bpm_freq_bonus_arg *parg)
++{
++ int bonus;
++ int new_idx;
++ unsigned long flag;
++ int cur_dyn_idx, new_dyn_idx;
++
++ if (g_dvfm_blink)
++ return 0;
++
++ /*
++ * bpm_enable_op_name() and bpm_disable_op_name() will update
++ * g_dyn_ops[] and g_active_xxx[], and then scale the op, so
++ * we need to avoid the conflict.
++ * Below code can not call schedule() indirectly.
++ */
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ if (0 == g_bpm_enabled) {
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++ return 0;
++ }
++
++ bonus = calc_bonus(parg);
++ new_idx = calc_new_idx(bonus);
++
++ cur_dyn_idx = get_dyn_idx(g_active_cur_idx);
++ new_dyn_idx = get_dyn_idx(new_idx);
++
++/*
++ DPRINTK
++ ("bonus:%d, cur_idx: %d, new_idx: %d, old_hw_idx: %d, new_hw_idx: %d\n",
++ bonus, g_active_cur_idx, new_idx, cur_dyn_idx, new_dyn_idx);
++*/
++ if (new_idx != g_active_cur_idx) {
++ if (!bpm_change_op(cur_dyn_idx, new_dyn_idx)) {
++ g_active_cur_idx = new_idx;
++ } else {
++ DPRINTK("scaling freq later!\n");
++ }
++ g_prefer_op_idx = new_dyn_idx;
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return 0;
++}
++
++static void dvfm_blink_timer_handler(unsigned long data)
++{
++ unsigned long flag;
++
++ local_irq_save(flag);
++
++ g_dvfm_blink = 0;
++ g_dvfm_blink_timeout = 0;
++ memset(&g_dvfm_binfo, 0, sizeof(struct dvfm_blink_info));
++
++ local_irq_restore(flag);
++}
++
++static int handle_blink(struct bpm_event *pevent)
++{
++ int new_idx;
++ unsigned long flag;
++ int cur_dyn_idx, new_dyn_idx;
++ struct dvfm_blink_info *pinfo = NULL;
++
++ if (0 == g_bpm_enabled)
++ return 0;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ pinfo = (struct dvfm_blink_info *)pevent->info;
++
++ DPRINTK("Blink: %d %lu %lu\n", g_dvfm_blink, g_dvfm_blink_timeout, jiffies + msecs_to_jiffies(pinfo->time));
++
++ if ((0 == g_dvfm_blink) || time_before(g_dvfm_blink_timeout, jiffies + msecs_to_jiffies(pinfo->time))) {
++
++ memcpy(&g_dvfm_binfo, pinfo, sizeof(struct dvfm_blink_info));
++
++ g_dvfm_blink_timeout = jiffies + msecs_to_jiffies(pinfo->time);
++ g_dvfm_blink = 1;
++ mod_timer(&g_dvfm_blink_timer, g_dvfm_blink_timeout);
++
++ new_idx = g_active_ops_num - 1;
++ cur_dyn_idx = get_dyn_idx(g_active_cur_idx);
++ new_dyn_idx = get_dyn_idx(new_idx);
++
++ if (new_dyn_idx > cur_dyn_idx) {
++ if (!bpm_change_op(cur_dyn_idx, new_dyn_idx)) {
++ g_active_cur_idx = new_idx;
++ g_prefer_op_idx = new_dyn_idx;
++ }
++ }
++ } else {
++ printk("Blink: %s already set and blink(%lu)\n", g_dvfm_binfo.name, g_dvfm_blink_timeout);
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return 0;
++}
++
++static int handle_profiler(struct bpm_event *pevent)
++{
++ struct ipm_profiler_result *pinfo =
++ (struct ipm_profiler_result *)pevent->info;
++ struct bpm_freq_bonus_arg bonus_arg;
++ int mips = pinfo->mips;
++ int mem_stall = 0;
++
++#ifdef CONFIG_TEST_BPMD
++ static int cpuload = 10;
++ switch (g_cpuload_mode) {
++ case 0:
++ cpuload = mips * 100 / get_cur_freq();
++ break;
++ case 1:
++ cpuload = (cpuload == 10 ? 90 : 10);
++ break;
++ case 2:
++ cpuload = OSCR % 101;
++ break;
++ case 3:
++ cpuload = (OSCR & 0x1) ? 90 : 10;
++ break;
++ case 4:
++ cpuload = OSCR % 21;
++ break;
++ case 5:
++ cpuload = 80 + OSCR % 21;
++ break;
++ }
++ mips = cpuload * get_cur_freq() / 100;
++
++// DPRINTK("orig ratio: %d new ratio: %d\n", pinfo->busy_ratio, busy);
++#endif
++ DPRINTK("time_load: %d mips_load: %d (%d)\n", pinfo->busy_ratio, mips * 100 / get_cur_freq(), get_cur_freq());
++
++ /*
++ * Get PMU Data, bla bla bla...
++ */
++ bonus_arg.mips = mips;
++ bonus_arg.mem_stall = mem_stall;
++
++ handle_profiler_arg(&bonus_arg);
++
++ bpm_start_pmu();
++ return 0;
++}
++
++static int bpm_process_event(struct bpm_event *pevent)
++{
++ switch (pevent->type) {
++ case IPM_EVENT_PROFILER:
++ handle_profiler(pevent);
++ break;
++
++ case IPM_EVENT_BLINK:
++ handle_blink(pevent);
++ break;
++
++ default:
++ PM_BUG_ON(1);
++ }
++ return 0;
++}
++
++int bpm_pre_enter_d0csidle(int* op)
++{
++ unsigned long flag;
++ int ret = 0, new_dyn_idx;;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ if (g_active_cur_idx != -1)
++ *op = get_dyn_idx(g_active_cur_idx);
++ else
++ *op = dvfm_get_defop();
++
++ new_dyn_idx = get_dyn_idx(0);
++ if (*op > new_dyn_idx) {
++ ret = bpm_change_op(*op, new_dyn_idx);
++
++ if ((0 == ret) && (-1 != g_active_cur_idx)) {
++ g_active_cur_idx = 0;
++ }
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++#ifdef CONFIG_MTD_NAND_HSS_FIX
++ if (!atomic_read(&nand_in_cmd))
++#endif
++ PM_BUG_ON(ret);
++
++ return ret;
++}
++
++int bpm_post_exit_d0csidle(int op)
++{
++ unsigned long flag;
++ int new_idx = -1;
++ int cur_dyn_op, new_dyn_op;
++ int i, ret;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ if (g_active_cur_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) >= op) {
++ new_idx = i;
++ break;
++ }
++ }
++
++ PM_BUG_ON(new_idx == -1);
++
++ cur_dyn_op = get_dyn_idx(g_active_cur_idx);
++ new_dyn_op = get_dyn_idx(new_idx);
++
++ PM_BUG_ON(cur_dyn_op != cur_op);
++
++ g_active_cur_idx = new_idx;
++ } else {
++ cur_dyn_op = cur_op;
++ new_dyn_op = dvfm_get_defop();
++ PM_BUG_ON(op != new_dyn_op);
++ }
++
++ PM_BUG_ON(cur_dyn_op > new_dyn_op);
++
++ if (cur_dyn_op != new_dyn_op) {
++ ret = bpm_change_op(cur_dyn_op, new_dyn_op);
++ PM_BUG_ON(ret);
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return 0;
++}
++
++int bpm_set_active_op(const unsigned char* opname)
++{
++ int opname_idx = -1, i, cur_idx;
++ int ret = 0;
++ unsigned long flag;
++
++ if (-1 != g_active_cur_idx) {
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ for (i = 0; i < g_active_ops_num; ++i) {
++ cur_idx = g_active_ops_map[i];
++ if (!strcmp(opname, g_dyn_ops[cur_idx].name)) {
++ opname_idx = i;
++ }
++ }
++
++ if(opname_idx != -1) {
++ if (g_active_cur_idx != opname_idx) {
++ ret = bpm_change_op(get_dyn_idx(g_active_cur_idx), get_dyn_idx(opname_idx));
++ g_active_cur_idx = opname_idx;
++ g_prefer_op_idx = get_dyn_idx(opname_idx);
++ PM_BUG_ON(ret);
++ }
++ } else
++ printk(KERN_WARNING "Cannot find %s, %s is disabled?\n", opname, opname);
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++ }
++
++ return ret;
++}
++/*****************************************************************************/
++/* */
++/* BPMD Thread */
++/* */
++/*****************************************************************************/
++
++static int change_to_active_op(void)
++{
++ unsigned long flag;
++ int ret = 0;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ g_active_cur_idx = g_active_ops_num - 1;
++ ret = bpm_change_op(dvfm_get_defop(), get_dyn_idx(g_active_cur_idx));
++ g_prefer_op_idx = cur_op;
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ PM_BUG_ON(ret);
++
++ return ret;
++}
++
++static int change_to_def_op(void)
++{
++ unsigned long flag;
++ int ret = 0;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ ret = bpm_change_op(get_dyn_idx(g_active_cur_idx), dvfm_get_defop());
++ g_prefer_op_idx = cur_op;
++
++ g_active_cur_idx = -1;
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ PM_BUG_ON(ret);
++
++ return ret;
++}
++
++static int bpm_start(void)
++{
++ int ret;
++
++ if (0 == g_bpm_enabled) {
++ bpmq_clear();
++ change_to_active_op();
++ ret = bpm_start_pmu();
++ if (ret) {
++ printk(KERN_ERR "Can't start_pmu, ret: %d\n", ret);
++ g_bpm_enabled = 0;
++ return ret;
++ }
++ g_bpm_enabled = 1;
++#ifdef DEBUG
++ bpm_dump_policy();
++#endif
++ wake_up_interruptible(&g_bpm_enabled_waitq);
++ } else {
++ printk(KERN_DEBUG "bpmd already enabled (%d)\n", g_bpm_enabled);
++ }
++
++ return 0;
++}
++
++extern int gpio_reset_work_around(void);
++static int bpm_stop(void)
++{
++ if (1 == g_bpm_enabled) {
++ bpm_stop_pmu();
++ if (machine_is_bstd())
++ gpio_reset_work_around();
++ else
++ change_to_def_op();
++ g_bpm_enabled = 0;
++ } else {
++ printk(KERN_DEBUG "bpmd already stopped (%d)\n", g_bpm_enabled);
++ }
++
++ return 0;
++}
++
++static int bpm_thread(void *data)
++{
++ int ret = 0;
++ struct bpm_event event;
++ struct task_struct *tsk = current;
++ struct sched_param param = {.sched_priority = 1 };
++
++ DEFINE_WAIT(wait);
++
++ if (g_dvfm_disabled)
++ goto thread_over;
++
++ daemonize("bpmd");
++ strcpy(tsk->comm, "bpmd");
++
++ allow_signal(SIGKILL);
++ sched_setscheduler(tsk, SCHED_FIFO, &param);
++
++ g_bpm_log_level = 0;
++
++ msleep(SYSTEM_BOOTUP_TIME);
++
++ ret = bpm_start();
++ PM_BUG_ON(ret);
++
++ DPRINTK("Begining bpm deamon thread ...\n");
++
++ while (likely(!g_bpm_thread_exit)) {
++
++ if (unlikely(signal_pending(tsk))) {
++ printk(KERN_NOTICE "BPMD is killed by SIGKILL!\n");
++ break;
++ }
++
++// DPRINTK("g_bpm_enabled = %d, bpmq_empty = %d\n",
++// g_bpm_enabled, bpmq_empty());
++
++ if (likely(g_bpm_enabled)) {
++ if (likely(bpmq_empty())) {
++ prepare_to_wait(&g_bpm_event_queue.waitq, &wait,
++ TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&g_bpm_event_queue.waitq, &wait);
++ }
++
++ if (likely(!bpmq_empty())) {
++ ret = bpmq_get(&event);
++ PM_BUG_ON(ret);
++
++ bpm_process_event(&event);
++ }
++ } else {
++ prepare_to_wait(&g_bpm_enabled_waitq, &wait,
++ TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&g_bpm_enabled_waitq, &wait);
++ }
++ }
++
++ bpm_stop();
++
++thread_over:
++ complete_and_exit(&g_bpm_thread_over, 0);
++
++ printk(KERN_WARNING "bpm daemon thread exit!\n");
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD SYS Interface */
++/* */
++/*****************************************************************************/
++
++static ssize_t op_show(struct sys_device *sys_dev, char *buf)
++{
++ int cur_dyn_idx, len;
++
++ if (g_active_cur_idx != -1)
++ cur_dyn_idx = get_dyn_idx(g_active_cur_idx);
++ else
++ cur_dyn_idx = dvfm_get_defop();
++
++ PM_BUG_ON(cur_dyn_idx != cur_op);
++
++ len = dvfm_dump_op(cur_dyn_idx, buf);
++
++ return len;
++}
++
++static ssize_t op_store(struct sys_device *sys_dev, const char *buf, size_t len)
++{
++ int i;
++ int dyn_idx, new_dyn_idx, cur_dyn_idx, new_active_idx = -1;
++ unsigned long flag;
++ int res = 0;
++
++ sscanf(buf, "%u", &new_dyn_idx);
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ for (i = 0; i < g_active_ops_num; ++i) {
++ dyn_idx = g_active_ops_map[i];
++ if (g_dyn_ops[dyn_idx].index == new_dyn_idx) {
++ new_active_idx = i;
++ break;
++ }
++ }
++
++ if (new_active_idx != -1) {
++ if (g_active_cur_idx != -1)
++ cur_dyn_idx = get_dyn_idx(g_active_cur_idx);
++ else
++ cur_dyn_idx = dvfm_get_defop();
++
++ res = bpm_change_op(cur_dyn_idx, new_dyn_idx);
++ g_prefer_op_idx = new_dyn_idx;
++
++ PM_BUG_ON(res);
++
++ g_active_cur_idx = new_active_idx;
++ } else {
++ printk(KERN_ERR "bpm is enabled, new dyn op:%d\n", new_dyn_idx);
++ printk(KERN_ERR "Cannot find new active op, please check it\n");
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return len;
++}
++
++SYSDEV_ATTR(op, 0644, op_show, op_store);
++
++static ssize_t ops_show(struct sys_device *sys_dev, char *buf)
++{
++ int len = 0;
++ char *p = NULL;
++ int i;
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ if (g_dyn_ops[i].name != NULL) {
++ p = buf + len;
++ len += dvfm_dump_op(i, p);
++ }
++ }
++
++ return len;
++}
++
++SYSDEV_ATTR(ops, 0444, ops_show, NULL);
++
++static ssize_t enable_op_show(struct sys_device *sys_dev, char *buf)
++{
++ int len = 0;
++ char *p = NULL;
++ int i;
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ if ((!g_dyn_ops[i].count) && (g_dyn_ops[i].name != NULL)) {
++ p = buf + len;
++ len += dvfm_dump_op(i, p);
++ }
++ }
++
++ return len;
++}
++
++static ssize_t enable_op_store(struct sys_device *sys_dev, const char *buf,
++ size_t len)
++{
++ int level;
++ char name[16];
++
++ if (len >= 16) {
++ printk(KERN_ERR "invalid parameter\n");
++ return len;
++ }
++
++ memset(name, 0, sizeof(name));
++ sscanf(buf, "%s %d", name, &level);
++
++ if (level)
++ bpm_enable_op_name(name, dvfm_dev_idx, "user-echo");
++ else
++ bpm_disable_op_name(name, dvfm_dev_idx, "user-echo");
++
++ return len;
++}
++
++SYSDEV_ATTR(enable_op, 0666, enable_op_show, enable_op_store);
++
++static ssize_t profiler_window_show(struct sys_device *sys_dev, char *buf)
++{
++ char *s = buf;
++
++ s += sprintf(s, "%d\n", g_profiler_window);
++
++ return (s - buf);
++}
++
++static ssize_t profiler_window_store(struct sys_device *sys_dev,
++ const char *buf, size_t n)
++{
++ sscanf(buf, "%u", &g_profiler_window);
++
++ if (g_profiler_window < 10 || g_profiler_window > 20000)
++ printk(KERN_ERR "please input the value in (10, 20000]\n");
++
++ return n;
++}
++
++SYSDEV_ATTR(profiler_window, 0644, profiler_window_show, profiler_window_store);
++
++static ssize_t bpm_show(struct sys_device *sys_dev, char *buf)
++{
++ char *s = buf;
++
++ if (g_bpm_enabled)
++ s += sprintf(s, "%s\n", "enabled");
++ else
++ s += sprintf(s, "%s\n", "disabled");
++
++ return (s - buf);
++}
++
++static ssize_t bpm_store(struct sys_device *sys_dev, const char *buf, size_t n)
++{
++ if (n >= strlen("enable") &&
++ strncmp(buf, "enable", strlen("enable")) == 0) {
++ bpm_start();
++ return n;
++ }
++
++ if (n >= strlen("disable") &&
++ strncmp(buf, "disable", strlen("disable")) == 0) {
++ bpm_stop();
++ return n;
++ }
++
++ printk(KERN_ERR "invalid input, please try \"enable\" or \"disable\"\n");
++ return n;
++}
++
++SYSDEV_ATTR(bpm, 0644, bpm_show, bpm_store);
++
++static ssize_t blink_show(struct sys_device *sys_dev, char *buf)
++{
++ char *s = buf;
++
++ if (g_dvfm_blink)
++ s += sprintf(s, "blink: %s\n", g_dvfm_binfo.name);
++ else
++ s += sprintf(s, "blink: no\n");
++
++ return (s - buf);
++}
++
++static ssize_t blink_store(struct sys_device *sys_dev, const char *buf, size_t len)
++{
++ struct dvfm_blink_info binfo;
++
++ if (len >= (DVFM_BLINK_OWNER_LEN - 1)) {
++ printk(KERN_ERR "%s sets an invalid parameter of blink\n", current->comm);
++ return len;
++ }
++
++ memset(binfo.name, 0, sizeof(binfo.name));
++ sscanf(buf, "%s %d %*s", binfo.name, &binfo.time);
++
++ DPRINTK("blink: %s %d\n", binfo.name, binfo.time);
++
++ if (binfo.time < 0 || binfo.time > 3000) {
++ printk("%s sets an invalid time of blink\n", current->comm);
++ return len;
++ }
++
++ bpm_event_notify(IPM_EVENT_BLINK, IPM_EVENT_BLINK_SPEEDUP, &binfo,
++ sizeof(struct dvfm_blink_info));
++
++ return len;
++}
++SYSDEV_ATTR(blink, 0666, blink_show, blink_store);
++
++static ssize_t log_show(struct sys_device *sys_dev, char *buf)
++{
++ char *s = buf;
++
++ s += sprintf(s, "%d\n", g_bpm_log_level);
++
++ return (s - buf);
++}
++
++static ssize_t log_store(struct sys_device *sys_dev, const char *buf, size_t n)
++{
++ sscanf(buf, "%u", &g_bpm_log_level);
++
++ if (g_bpm_log_level < 0 || g_bpm_log_level > 7) {
++ g_bpm_log_level = 0;
++ printk(KERN_ERR "invalid command\n");
++ }
++ return n;
++}
++
++SYSDEV_ATTR(log, 0644, log_show, log_store);
++
++static ssize_t cons_show(struct sys_device *sys_dev, char *buf)
++{
++ char *s = buf;
++ struct list_head *list = NULL;
++ struct bpm_cons *p = NULL;
++ int i;
++ unsigned long avg_ms;
++
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i) {
++ s += sprintf(s, "op %d: %d\n", i, g_dyn_ops[i].count);
++ list_for_each(list, &(g_bpm_cons[i].list)) {
++ p = list_entry(list, struct bpm_cons, list);
++ if (p->tm)
++ avg_ms = p->ms / p->tm;
++ else
++ avg_ms = 0;
++ s += sprintf(s, "\t%8ld %12ld %8ld %s: %d\n",
++ p->tm, p->ms, avg_ms, p->sid, p->count);
++ }
++ }
++
++ return (s - buf);
++}
++
++static ssize_t cons_store(struct sys_device *sys_dev, const char *buf, size_t n)
++{
++ struct list_head *list = NULL;
++ struct bpm_cons *p = NULL;
++ int i;
++ int cons_ctl = 0;
++
++ sscanf(buf, "%u", &cons_ctl);
++
++ if (1 == cons_ctl) {
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i) {
++ list_for_each(list, &(g_bpm_cons[i].list)) {
++ p = list_entry(list, struct bpm_cons, list);
++ p->tm = 0;
++ p->ms = 0;
++ p->tmp_ms = 0;
++ }
++ }
++ }
++
++ return n;
++}
++
++SYSDEV_ATTR(cons, 0644, cons_show, cons_store);
++
++/*
++ * Dump blocked device on specified OP.
++ * And dump the device list that is tracked.
++ */
++static ssize_t trace_show(struct sys_device *sys_dev, char *buf)
++{
++ struct op_info *op_entry = NULL;
++ struct dvfm_trace_info *entry = NULL;
++ int len = 0, i;
++ unsigned int blocked_dev;
++
++ for (i = 0; i < op_nums; i++) {
++ blocked_dev = 0;
++ read_lock(&dvfm_op_list->lock);
++ /* op list shouldn't be empty because op_nums is valid */
++ list_for_each_entry(op_entry, &dvfm_op_list->list, list) {
++ if (op_entry->index == i)
++ blocked_dev = op_entry->device;
++ }
++ read_unlock(&dvfm_op_list->lock);
++ if (!blocked_dev)
++ continue;
++
++ len += sprintf(buf + len, "Blocked devices on OP%d:", i);
++ read_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ if (test_bit(entry->index, (void *)&blocked_dev))
++ len += sprintf(buf + len, "%s, ", entry->name);
++ }
++ read_unlock(&dvfm_trace_list.lock);
++ len += sprintf(buf + len, "\n");
++ }
++ if (len == 0)
++ len += sprintf(buf + len, "None device block OP\n");
++ len += sprintf(buf + len, "Trace device list:\n");
++ read_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ len += sprintf(buf + len, "%s, ", entry->name);
++ }
++ read_unlock(&dvfm_trace_list.lock);
++ len += sprintf(buf + len, "\n");
++ return len;
++}
++SYSDEV_ATTR(trace, 0444, trace_show, NULL);
++
++static struct attribute *bpm_attr[] = {
++ &attr_bpm.attr,
++ &attr_profiler_window.attr,
++ &attr_op.attr,
++ &attr_ops.attr,
++ &attr_enable_op.attr,
++ &attr_log.attr,
++ &attr_cons.attr,
++ &attr_blink.attr,
++ &attr_trace.attr,
++};
++
++static int bpm_add(struct sys_device *sys_dev)
++{
++ int i, n, ret;
++ n = ARRAY_SIZE(bpm_attr);
++ for (i = 0; i < n; ++i) {
++ ret = sysfs_create_file(&(sys_dev->kobj), bpm_attr[i]);
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++
++static int bpm_rm(struct sys_device *sys_dev)
++{
++ int i, n;
++ n = ARRAY_SIZE(bpm_attr);
++ for (i = 0; i < n; i++) {
++ sysfs_remove_file(&(sys_dev->kobj), bpm_attr[i]);
++ }
++ return 0;
++}
++
++static struct sysdev_driver bpm_driver = {
++ .add = bpm_add,
++ .remove = bpm_rm,
++};
++
++#ifdef CONFIG_TEST_BPMD
++#include "test_bpm.c"
++#endif
++/*****************************************************************************/
++/* */
++/* BPMD Init & Fini */
++/* */
++/*****************************************************************************/
++
++static int __init bpm_init(void)
++{
++ unsigned int ret = 0;
++ unsigned long flag;
++
++ bpmq_init();
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ build_dyn_ops();
++ build_active_ops();
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ g_bpm_enabled = 0;
++ init_waitqueue_head(&g_bpm_enabled_waitq);
++
++ ret = sysdev_driver_register(&cpu_sysdev_class, &bpm_driver);
++ if (ret) {
++ printk(KERN_ERR "Can't register bpm sys driver,err:%d\n", ret);
++ PM_BUG_ON(1);
++ }
++
++#ifdef CONFIG_TEST_BPMD
++ ret = sysdev_driver_register(&cpu_sysdev_class, &bpm_test_driver);
++ if (ret) {
++ printk(KERN_ERR "Can't register bpm test driver,err:%d\n", ret);
++ PM_BUG_ON(1);
++ }
++#endif
++
++ dvfm_register("user-echo", &dvfm_dev_idx);
++
++#ifdef CONFIG_ANDROID_POWER
++ android_register_early_suspend(&bpm_early_suspend);
++#endif
++ init_timer(&g_dvfm_blink_timer);
++ g_dvfm_blink_timer.function = dvfm_blink_timer_handler;
++ g_dvfm_blink_timer.data = (unsigned long)NULL;
++
++ g_bpm_thread_exit = 0;
++ init_completion(&g_bpm_thread_over);
++ ret = kernel_thread(bpm_thread, NULL, 0);
++
++ printk(KERN_NOTICE "bpm init finished (%d)\n", ret);
++ return 0;
++}
++
++static void __exit bpm_exit(void)
++{
++
++ g_bpm_thread_exit = 1;
++
++#ifdef CONFIG_ANDROID_POWER
++ android_unregister_early_suspend(&bpm_early_suspend);
++#endif
++ dvfm_unregister("user-echo", &dvfm_dev_idx);
++
++ g_bpm_enabled = 1;
++ wake_up_interruptible(&g_bpm_enabled_waitq);
++ wake_up_interruptible(&g_bpm_event_queue.waitq);
++ wait_for_completion(&g_bpm_thread_over);
++ g_bpm_enabled = 0;
++}
++
++module_init(bpm_init);
++module_exit(bpm_exit);
++
++MODULE_DESCRIPTION("BPMD");
++MODULE_LICENSE("GPL");
+diff -ur linux-2.6.32/arch/arm/mach-pxa/bpm_prof.c kernel/arch/arm/mach-pxa/bpm_prof.c
+--- linux-2.6.32/arch/arm/mach-pxa/bpm_prof.c 2009-12-13 12:58:12.232379200 +0200
++++ kernel/arch/arm/mach-pxa/bpm_prof.c 2009-12-12 16:09:26.429614458 +0200
+@@ -0,0 +1,564 @@
++/*
++ * PXA3xx IPM Profiler
++ *
++ * Copyright (C) 2008 Borqs Ltd.
++ * Emichael Li <emichael.li@borqs.com>
++ *
++ * Based on Marvell v6.5 release.
++ *
++ * Copyright (C) 2008 Marvell Corporation
++ * Haojian Zhuang <haojian.zhuang@marvell.com>
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++
++ * (C) Copyright 2008 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/tick.h>
++#include <linux/timer.h>
++#include <linux/device.h>
++#include <linux/jiffies.h>
++#include <mach/hardware.h>
++#include <mach/mspm_prof.h>
++#include <asm/arch/ipmc.h>
++#ifdef CONFIG_PXA3xx_DVFM
++#include <asm/arch/dvfm.h>
++#include <asm/arch/pxa3xx_dvfm.h>
++#endif
++
++extern int (*pipm_start_pmu)(struct ipm_profiler_arg *arg);
++extern int (*pipm_stop_pmu)(void);
++
++/* IDLE profiler tune OP with MIPS feature */
++#define MSPM_IDLE_PROF_MIPS 0
++
++#undef MAX_OP_NUM
++#define MAX_OP_NUM 10
++
++struct mspm_op_stats {
++ int op;
++ int idle;
++ unsigned int timestamp;
++ unsigned int jiffies;
++};
++
++struct mspm_mips {
++ int mips;
++ int h_thres; /* high threshold */
++ int l_thres; /* low threshold */
++};
++
++/* Store costed time in run_op_time[] & idle_op_time[] */
++static int run_op_time[MAX_OP_NUM], idle_op_time[MAX_OP_NUM];
++
++/*
++ * Store OP's MIPS in op_mips[].
++ * The lowest frequency OP is the first entry.
++ */
++static struct mspm_mips op_mips[MAX_OP_NUM];
++
++/* Store the calculated MIPS of last sample window */
++static int last_mips;
++
++/*
++ * Store the first timestamp of sample window in first_stats
++ * Store the current timestamp of sample window in cur_stats
++ */
++static struct mspm_op_stats first_stats, cur_stats;
++
++/* OP numbers used in IPM IDLE Profiler */
++static int mspm_op_num = 0;
++
++static struct timer_list idle_prof_timer;
++
++/* PMU result is stored in it */
++static struct pmu_results sum_pmu_res;
++
++static int mspm_prof_enabled = 0;
++static int window_jif = 0;
++static int mspm_pmu_id;
++
++unsigned int prof_idle_time, prof_time;
++
++static int mspm_prof_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data);
++static struct notifier_block notifier_freq_block = {
++ .notifier_call = mspm_prof_notifier_freq,
++};
++
++static unsigned int read_time(void)
++{
++#ifdef CONFIG_PXA_32KTIMER
++ return OSCR4;
++#else
++ return OSCR0;
++#endif
++}
++
++
++static int bpm_mod_timer(struct timer_list *timer, unsigned long expires)
++{
++#ifdef CONFIG_BPMD
++ extern void timer_set_deferrable(struct timer_list *timer);
++ extern void timer_clr_deferrable(struct timer_list *timer);
++ extern int get_op_power_bonus(void);
++
++ if (get_op_power_bonus())
++ timer_set_deferrable(timer);
++ else
++ timer_clr_deferrable(timer);
++#endif
++ mod_timer(timer, expires);
++
++ return 0;
++}
++
++/*
++ * Record the OP index and RUN/IDLE state.
++ */
++int mspm_add_event(int op, int cpu_idle)
++{
++ unsigned int time;
++
++ if (mspm_prof_enabled) {
++ time = read_time();
++ /* sum the current sample window */
++ if (cpu_idle == CPU_STATE_IDLE)
++ idle_op_time[cur_stats.op] +=
++ time - cur_stats.timestamp;
++ else if (cpu_idle == CPU_STATE_RUN)
++ run_op_time[cur_stats.op] +=
++ time - cur_stats.timestamp;
++ /* update start point of current sample window */
++ cur_stats.op = op;
++ cur_stats.idle = cpu_idle;
++ cur_stats.timestamp = time;
++ cur_stats.jiffies = jiffies;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(mspm_add_event);
++
++/*
++ * Prepare to do a new sample.
++ * Clear the index in mspm_op_stats table.
++ */
++static int mspm_do_new_sample(void)
++{
++ /* clear previous sample window */
++ memset(&run_op_time, 0, sizeof(int) * MAX_OP_NUM);
++ memset(&idle_op_time, 0, sizeof(int) * MAX_OP_NUM);
++ /* prepare for the new sample window */
++ first_stats.op = cur_stats.op;
++ first_stats.idle = cur_stats.idle;
++ first_stats.timestamp = read_time();
++ first_stats.jiffies = jiffies;
++
++ prof_idle_time = 0;
++ prof_time = read_time();
++ return 0;
++}
++
++/*
++ * Init MIPS of all OP
++ */
++static int mspm_init_mips(void)
++{
++ struct op_info *info = NULL;
++ struct dvfm_md_opt *md_op = NULL;
++ int i, ret;
++ memset(&op_mips, 0, MAX_OP_NUM * sizeof(struct mspm_mips));
++ mspm_op_num = dvfm_op_count();
++#ifdef CONFIG_PXA3xx_DVFM
++ for (i = 0; i < mspm_op_num; i++) {
++ ret = dvfm_get_opinfo(i, &info);
++ if (ret)
++ continue;
++ md_op = (struct dvfm_md_opt *)info->op;
++ op_mips[i].mips = md_op->core;
++ if (op_mips[i].mips) {
++ op_mips[i].h_thres = DEF_HIGH_THRESHOLD;
++ if (!strcmp(md_op->name, "D0CS"))
++ op_mips[i].h_thres = 95;
++ } else {
++ mspm_op_num = i;
++ break;
++ }
++ }
++ for (i = 0; i < mspm_op_num - 1; i++)
++ op_mips[i + 1].l_thres = op_mips[i].h_thres * op_mips[i].mips
++ / op_mips[i + 1].mips;
++#endif
++ return 0;
++}
++
++/*
++ * Calculate the MIPS in sample window
++ */
++static int mspm_calc_mips(void)
++{
++ int i;
++ unsigned int sum_time = 0, sum = 0;
++
++ /* Calculate total time costed in sample window */
++ for (i = 0; i < mspm_op_num; i++) {
++ sum_time += run_op_time[i] + idle_op_time[i];
++ sum += run_op_time[i] * op_mips[i].mips;
++ }
++ if (sum_time == 0)
++ return 0;
++
++ /*
++ * Calculate MIPS in sample window
++ * Formula: run_op_time[i] / sum_time * op_mips[i].mips
++ */
++ return (sum / sum_time);
++}
++
++static int is_valid_sample_window(void)
++{
++ unsigned int time;
++ /* The sample window isn't started */
++ if (!mspm_prof_enabled)
++ goto out;
++ time = cur_stats.jiffies - first_stats.jiffies;
++ time = jiffies_to_msecs(time);
++ if (time >= MIN_SAMPLE_WINDOW)
++ return 1;
++out:
++ return 0;
++}
++
++/*
++ * When DVFM release one OP, it will invoke this func to get the prefered OP.
++ */
++static int mspm_get_mips(void)
++{
++ int ret;
++ extern int cur_op;
++
++ mspm_add_event(cur_op, CPU_STATE_RUN);
++
++ if (!is_valid_sample_window()) {
++ /* This sample window is invalide, use MIPS value of last
++ * sample window
++ */
++ ret = last_mips;
++ goto out_sample;
++ }
++ ret = mspm_calc_mips();
++ if (ret < 0)
++ goto out_calc;
++ return ret;
++out_calc:
++ printk(KERN_WARNING "Can't calculate MIPS\n");
++out_sample:
++ return ret;
++}
++
++/*
++ * Adjust to the most appropriate OP according to MIPS result of
++ * sample window
++ */
++#if MSPM_IDLE_PROF_MIPS
++int mspm_tune(void)
++{
++ int i, mips;
++ if (mspm_prof_enabled) {
++ for (i = mspm_op_num - 1; i >= 0; i--) {
++ mips = mspm_get_mips();
++ if (mips >= (op_mips[i].l_thres *
++ op_mips[i].mips / 100))
++ break;
++ }
++ dvfm_request_op(i);
++ }
++ return 0;
++}
++#else
++int mspm_tune(void) { return 0; }
++#endif
++EXPORT_SYMBOL(mspm_tune);
++
++/***************************************************************************
++ * Idle Profiler
++ ***************************************************************************
++ */
++
++static struct ipm_profiler_arg pmu_arg;
++static int mspm_start_prof(struct ipm_profiler_arg *arg)
++{
++ struct pmu_results res;
++ struct op_info *info = NULL;
++
++ memset(&sum_pmu_res, 0, sizeof(struct pmu_results));
++
++ /* pmu_arg.window_size stores the number of miliseconds.
++ * window_jif stores the number of jiffies.
++ */
++ memset(&pmu_arg, 0, sizeof(struct ipm_profiler_arg));
++ pmu_arg.flags = arg->flags;
++ if (arg->window_size > 0)
++ pmu_arg.window_size = arg->window_size;
++ else
++ pmu_arg.window_size = DEF_SAMPLE_WINDOW;
++ window_jif = msecs_to_jiffies(pmu_arg.window_size);
++ if ((mspm_pmu_id > 0) && (pmu_arg.flags & IPM_PMU_PROFILER)) {
++ pmu_arg.pmn0 = arg->pmn0;
++ pmu_arg.pmn1 = arg->pmn1;
++ pmu_arg.pmn2 = arg->pmn2;
++ pmu_arg.pmn3 = arg->pmn3;
++ /* Collect PMU information */
++ if (pmu_stop(&res))
++ printk(KERN_WARNING
++ "L:%d: pmu_stop failed!\n", __LINE__);
++ if (pmu_start(pmu_arg.pmn0, pmu_arg.pmn1, pmu_arg.pmn2,
++ pmu_arg.pmn3))
++ printk(KERN_WARNING
++ "L:%d: pmu_start failed!\n", __LINE__);
++ }
++ /* start next sample window */
++ cur_stats.op = dvfm_get_op(&info);
++ cur_stats.idle = CPU_STATE_RUN;
++ cur_stats.timestamp = read_time();
++ cur_stats.jiffies = jiffies;
++ mspm_do_new_sample();
++ bpm_mod_timer(&idle_prof_timer, jiffies + window_jif);
++ mspm_prof_enabled = 1;
++ return 0;
++}
++
++static int mspm_stop_prof(void)
++{
++ struct pmu_results res;
++ if ((mspm_pmu_id > 0) && (pmu_arg.flags & IPM_PMU_PROFILER)) {
++ if (pmu_stop(&res))
++ printk(KERN_WARNING
++ "L:%d: pmu_stop failed!\n", __LINE__);
++ }
++ del_timer(&idle_prof_timer);
++ mspm_prof_enabled = 0;
++ return 0;
++}
++
++static int calc_pmu_res(struct pmu_results *res)
++{
++ if (res == NULL)
++ return -EINVAL;
++ sum_pmu_res.ccnt += res->ccnt;
++ sum_pmu_res.pmn0 += res->pmn0;
++ sum_pmu_res.pmn1 += res->pmn1;
++ sum_pmu_res.pmn2 += res->pmn2;
++ sum_pmu_res.pmn3 += res->pmn3;
++ return 0;
++}
++
++/*
++ * Pause idle profiler when system enter Low Power mode.
++ * Continue it when system exit from Low Power mode.
++ */
++void set_idletimer(int enable)
++{
++ struct pmu_results res;
++ if (enable && mspm_prof_enabled) {
++ /*
++ * Restart the idle profiler because it's only disabled
++ * before entering low power mode.
++ * If we just continue the sample window with left jiffies,
++ * too much OS Timer wakeup exist in system.
++ * Just restart the sample window.
++ */
++ bpm_mod_timer(&idle_prof_timer, jiffies + window_jif);
++ tick_nohz_restart_sched_tick();
++
++ first_stats.jiffies = jiffies;
++ first_stats.timestamp = read_time();
++
++ if (pmu_arg.flags & IPM_PMU_PROFILER) {
++ if (pmu_start(pmu_arg.pmn0, pmu_arg.pmn1, pmu_arg.pmn2,
++ pmu_arg.pmn3)) {
++ printk(KERN_WARNING
++ "L:%d: pmu_start failed!\n", __LINE__);
++ }
++ }
++ } else if (!enable && mspm_prof_enabled) {
++ del_timer(&idle_prof_timer);
++ tick_nohz_stop_sched_tick(1);
++
++ if (pmu_arg.flags & IPM_PMU_PROFILER) {
++ if (pmu_stop(&res)) {
++ printk(KERN_WARNING
++ "L:%d: pmu_stop failed!\n", __LINE__);
++ } else
++ calc_pmu_res(&res);
++ }
++ }
++}
++EXPORT_SYMBOL(set_idletimer);
++
++/*
++ * Handler of IDLE PROFILER
++ */
++static void idle_prof_handler(unsigned long data)
++{
++ struct ipm_profiler_result out_res;
++ struct pmu_results res;
++ struct op_info *info = NULL;
++ int ret, mips, op;
++
++ if (!mspm_prof_enabled)
++ return;
++
++ ret = mspm_get_mips();
++ if (ret >= 0)
++ mips = ret;
++ else
++ mips = last_mips;
++ if ((mspm_pmu_id > 0) && (pmu_arg.flags & IPM_PMU_PROFILER)) {
++ if (pmu_stop(&res))
++ printk(KERN_WARNING "pmu_stop failed %d\n", __LINE__);
++ else
++ calc_pmu_res(&res);
++ if (pmu_start(pmu_arg.pmn0, pmu_arg.pmn1, pmu_arg.pmn2,
++ pmu_arg.pmn3))
++ printk(KERN_WARNING "pmu_start failed %d\n", __LINE__);
++ memset(&out_res, 0, sizeof(struct ipm_profiler_result));
++ out_res.pmu.ccnt = sum_pmu_res.ccnt;
++ out_res.pmu.pmn0 = sum_pmu_res.pmn0;
++ out_res.pmu.pmn1 = sum_pmu_res.pmn1;
++ out_res.pmu.pmn2 = sum_pmu_res.pmn2;
++ out_res.pmu.pmn3 = sum_pmu_res.pmn3;
++ }
++ op = dvfm_get_op(&info);
++
++#if 0
++ /* When system is running, MIPS of current OP won't be zero. */
++ out_res.busy_ratio = mips * 100 / op_mips[op].mips;
++ out_res.window_size = jiffies_to_msecs(window_jif);
++#endif
++
++ prof_time = read_time() - prof_time;
++
++ out_res.busy_ratio = 100 - 100 * prof_idle_time / prof_time;
++ out_res.window_size = 0; /* not used */
++ out_res.mips = mips;
++
++ /* send PMU result to policy maker in user space */
++ bpm_event_notify(IPM_EVENT_PROFILER, pmu_arg.flags, &out_res,
++ sizeof(struct ipm_profiler_result));
++
++#if 0
++ /* start next sample window */
++ mspm_do_new_sample();
++ bpm_mod_timer(&idle_prof_timer, jiffies + window_jif);
++ memset(&sum_pmu_res, 0, sizeof(struct pmu_results));
++#endif
++ last_mips = mips;
++}
++
++/*
++ * Pause idle profiler when system enter Low Power mode.
++ * Continue it when system exit from Low Power mode.
++ */
++static int mspm_prof_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ struct dvfm_freqs *freqs = (struct dvfm_freqs *)data;
++ struct op_info *info = &(freqs->new_info);
++ struct dvfm_md_opt *md = NULL;
++ struct pmu_results res;
++
++ if (!mspm_prof_enabled)
++ return 0;
++ md = (struct dvfm_md_opt *)(info->op);
++ if (md->power_mode == POWER_MODE_D1 ||
++ md->power_mode == POWER_MODE_D2 ||
++ md->power_mode == POWER_MODE_CG) {
++ switch (val) {
++ case DVFM_FREQ_PRECHANGE:
++ del_timer(&idle_prof_timer);
++ tick_nohz_stop_sched_tick(1);
++ if (pmu_arg.flags & IPM_PMU_PROFILER) {
++ if (pmu_stop(&res))
++ printk(KERN_WARNING
++ "L:%d: pmu_stop failed!\n",
++ __LINE__);
++ else
++ calc_pmu_res(&res);
++ }
++ break;
++ case DVFM_FREQ_POSTCHANGE:
++ /* Update jiffies and touch watchdog process */
++ tick_nohz_update_jiffies();
++ /*
++ * Restart the idle profiler because it's only
++ * disabled before entering low power mode.
++ * If we just continue the sample window with
++ * left jiffies, too much OS Timer wakeup exist
++ * in system.
++ * Just restart the sample window.
++ */
++ bpm_mod_timer(&idle_prof_timer, jiffies + window_jif);
++ first_stats.jiffies = jiffies;
++ first_stats.timestamp = read_time();
++
++ if (pmu_arg.flags & IPM_PMU_PROFILER)
++ if (pmu_start(pmu_arg.pmn0, pmu_arg.pmn1,
++ pmu_arg.pmn2, pmu_arg.pmn3))
++ printk(KERN_WARNING
++ "L:%d: pmu_start failed!\n",
++ __LINE__);
++ break;
++ }
++ }
++ return 0;
++}
++
++int __init mspm_prof_init(void)
++{
++ mspm_pmu_id = pmu_claim();
++
++ memset(&pmu_arg, 0, sizeof(struct ipm_profiler_arg));
++ pmu_arg.window_size = DEF_SAMPLE_WINDOW;
++ pmu_arg.pmn0 = PMU_EVENT_POWER_SAVING;
++ pmu_arg.pmn1 = PMU_EVENT_POWER_SAVING;
++ pmu_arg.pmn2 = PMU_EVENT_POWER_SAVING;
++ pmu_arg.pmn3 = PMU_EVENT_POWER_SAVING;
++ window_jif = msecs_to_jiffies(pmu_arg.window_size);
++
++ pipm_start_pmu = mspm_start_prof;
++ pipm_stop_pmu = mspm_stop_prof;
++
++ /* It's used to trigger sample window.
++ * If system is idle, the timer could be deferred.
++ */
++ init_timer(&idle_prof_timer);
++ idle_prof_timer.function = idle_prof_handler;
++ idle_prof_timer.data = 0;
++
++ mspm_init_mips();
++
++ dvfm_register_notifier(&notifier_freq_block,
++ DVFM_FREQUENCY_NOTIFIER);
++
++ return 0;
++}
++
++void __exit mspm_prof_exit(void)
++{
++ dvfm_unregister_notifier(&notifier_freq_block,
++ DVFM_FREQUENCY_NOTIFIER);
++
++ if (mspm_pmu_id)
++ pmu_release(mspm_pmu_id);
++
++ pipm_start_pmu = NULL;
++ pipm_stop_pmu = NULL;
++}
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/devices.c kernel/arch/arm/mach-pxa/devices.c
+--- linux-2.6.32/arch/arm/mach-pxa/devices.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/devices.c 2009-12-12 16:09:26.436277478 +0200
+@@ -15,6 +15,7 @@
+ #include <mach/camera.h>
+ #include <mach/audio.h>
+ #include <mach/pxa3xx_nand.h>
++#include <mach/pxa3xx_dvfm.h>
+
+ #include "devices.h"
+ #include "generic.h"
+@@ -962,6 +963,76 @@
+ },
+ };
+
++static struct resource pxa3xx_resource_freq[] = {
++ [0] = {
++ .name = "clkmgr_regs",
++ .start = 0x41340000,
++ .end = 0x41350003,
++ .flags = IORESOURCE_MEM,
++ },
++ [1] = {
++ .name = "spmu_regs",
++ .start = 0x40f50000,
++ .end = 0x40f50103,
++ .flags = IORESOURCE_MEM,
++ },
++ [2] = {
++ .name = "bpmu_regs",
++ .start = 0x40f40000,
++ .end = 0x40f4003b,
++ .flags = IORESOURCE_MEM,
++ },
++ [3] = {
++ .name = "dmc_regs",
++ .start = 0x48100000,
++ .end = 0x4810012f,
++ .flags = IORESOURCE_MEM,
++ },
++ [4] = {
++ .name = "smc_regs",
++ .start = 0x4a000000,
++ .end = 0x4a00008f,
++ .flags = IORESOURCE_MEM,
++ }
++};
++
++struct platform_device pxa3xx_device_freq = {
++ .name = "pxa3xx-freq",
++ .id = 0,
++ .num_resources = ARRAY_SIZE(pxa3xx_resource_freq),
++ .resource = pxa3xx_resource_freq,
++};
++
++void __init set_pxa3xx_freq_info(struct pxa3xx_freq_mach_info *info)
++{
++ pxa_register_device(&pxa3xx_device_freq, info);
++}
++
++void __init set_pxa3xx_freq_parent(struct device *parent_dev)
++{
++ pxa3xx_device_freq.dev.parent = parent_dev;
++}
++
++static struct resource pxa3xx_pmu_resources[] = {
++ [0] = {
++ .name = "pmu_regs",
++ .start = 0x4600ff00,
++ .end = 0x4600ffff,
++ .flags = IORESOURCE_MEM,
++ },
++};
++
++struct platform_device pxa3xx_device_pmu = {
++ .name = "pxa3xx-pmu",
++ .id = 0,
++ .resource = pxa3xx_pmu_resources,
++ .num_resources = ARRAY_SIZE(pxa3xx_pmu_resources),
++};
++
++void __init pxa3xx_set_pmu_info(void *info)
++{
++ pxa_register_device(&pxa3xx_device_pmu, info);
++}
+ #endif /* CONFIG_PXA3xx */
+
+ /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
+diff -ur linux-2.6.32/arch/arm/mach-pxa/devices.h kernel/arch/arm/mach-pxa/devices.h
+--- linux-2.6.32/arch/arm/mach-pxa/devices.h 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/devices.h 2009-12-12 16:09:26.436277478 +0200
+@@ -36,5 +36,6 @@
+ extern struct platform_device pxa3xx_device_i2c_power;
+
+ extern struct platform_device pxa3xx_device_gcu;
++extern struct platform_device pxa3xx_device_freq;
+
+ void __init pxa_register_device(struct platform_device *dev, void *data);
+diff -ur linux-2.6.32/arch/arm/mach-pxa/dvfm.c kernel/arch/arm/mach-pxa/dvfm.c
+--- linux-2.6.32/arch/arm/mach-pxa/dvfm.c 2009-12-13 12:58:54.725287534 +0200
++++ kernel/arch/arm/mach-pxa/dvfm.c 2009-12-12 16:09:26.439612372 +0200
+@@ -0,0 +1,922 @@
++/*
++ * DVFM Abstract Layer
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++
++ * (C) Copyright 2007 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/sysdev.h>
++#include <linux/spinlock.h>
++#include <linux/notifier.h>
++#include <linux/string.h>
++#include <linux/kobject.h>
++#include <linux/list.h>
++#include <linux/notifier.h>
++#include <asm/atomic.h>
++#include <mach/dvfm.h>
++
++#ifdef CONFIG_BPMD
++#include <mach/bpm.h>
++
++extern int bpm_enable_op(int index, int dev_idx);
++extern int bpm_disable_op(int index, int dev_idx);
++extern int bpm_enable_op_name(char *name, int dev_idx, char *sid);
++extern int bpm_disable_op_name(char *name, int dev_idx, char *sid);
++#endif
++
++#define MAX_DEVNAME_LEN 32
++/* This structure is used to dump device name list */
++struct name_list {
++ int id;
++ char name[MAX_DEVNAME_LEN];
++};
++
++static ATOMIC_NOTIFIER_HEAD(dvfm_freq_notifier_list);
++
++/* This list links log of dvfm operation */
++struct info_head dvfm_trace_list = {
++ .list = LIST_HEAD_INIT(dvfm_trace_list.list),
++ .lock = RW_LOCK_UNLOCKED,
++ .device = 0,
++};
++
++#ifndef CONFIG_BPMD
++/* This idx is used for user debug */
++static int dvfm_dev_idx;
++#endif
++
++struct dvfm_driver *dvfm_driver = NULL;
++struct info_head *dvfm_op_list = NULL;
++
++unsigned int cur_op; /* current operating point */
++unsigned int def_op; /* default operating point */
++unsigned int op_nums = 0; /* number of operating point */
++
++static atomic_t lp_count = ATOMIC_INIT(0); /* number of blocking lowpower mode */
++
++extern struct sysdev_class cpu_sysdev_class;
++
++int dvfm_find_op(int index, struct op_info **op)
++{
++ struct op_info *p = NULL;
++
++ read_lock(&dvfm_op_list->lock);
++ if (list_empty(&dvfm_op_list->list)) {
++ read_unlock(&dvfm_op_list->lock);
++ return -ENOENT;
++ }
++ list_for_each_entry(p, &dvfm_op_list->list, list) {
++ if (p->index == index) {
++ *op = p;
++ read_unlock(&dvfm_op_list->lock);
++ return 0;
++ }
++ }
++ read_unlock(&dvfm_op_list->lock);
++ return -ENOENT;
++}
++
++#ifndef CONFIG_BPMD
++/* Display current operating point */
++static ssize_t op_show(struct sys_device *sys_dev, struct sysdev_attribute *attr,char *buf)
++{
++ struct op_info *op = NULL;
++ int len = 0;
++
++ if (dvfm_driver->dump) {
++ if (!dvfm_find_op(cur_op, &op)) {
++ len = dvfm_driver->dump(dvfm_driver->priv, op, buf);
++ }
++ }
++
++ return len;
++}
++
++/* Set current operating point */
++static ssize_t op_store(struct sys_device *sys_dev, struct sysdev_attribute *attr, const char *buf,
++ size_t len)
++{
++ struct dvfm_freqs freqs;
++ int new_op;
++
++ sscanf(buf, "%u", &new_op);
++ dvfm_request_op(new_op);
++ return len;
++}
++SYSDEV_ATTR(op, 0644, op_show, op_store);
++
++/* Dump all operating point */
++static ssize_t ops_show(struct sys_device *sys_dev, struct sysdev_attribute *attr, char *buf)
++{
++ struct op_info *entry = NULL;
++ int len = 0;
++ char *p = NULL;
++
++ if (!dvfm_driver->dump)
++ return 0;
++ read_lock(&dvfm_op_list->lock);
++ if (!list_empty(&dvfm_op_list->list)) {
++ list_for_each_entry(entry, &dvfm_op_list->list, list) {
++ p = buf + len;
++ len += dvfm_driver->dump(dvfm_driver->priv, entry, p);
++ }
++ }
++ read_unlock(&dvfm_op_list->lock);
++
++ return len;
++}
++SYSDEV_ATTR(ops, 0444, ops_show, NULL);
++
++/* Dump all enabled operating point */
++static ssize_t enable_op_show(struct sys_device *sys_dev, struct sysdev_attribute *attr, char *buf)
++{
++ struct op_info *entry = NULL;
++ int len = 0;
++ char *p = NULL;
++
++ if (!dvfm_driver->dump)
++ return 0;
++ read_lock(&dvfm_op_list->lock);
++ if (!list_empty(&dvfm_op_list->list)) {
++ list_for_each_entry(entry, &dvfm_op_list->list, list) {
++ if (!entry->device) {
++ p = buf + len;
++ len += dvfm_driver->dump(dvfm_driver->priv, entry, p);
++ }
++ }
++ }
++ read_unlock(&dvfm_op_list->lock);
++
++ return len;
++}
++
++static ssize_t enable_op_store(struct sys_device *sys_dev, struct sysdev_attribute *attr, const char *buf,
++ size_t len)
++{
++ int op, level;
++
++ sscanf(buf, "%u,%u", &op, &level);
++ if (level) {
++ dvfm_enable_op(op, dvfm_dev_idx);
++ } else
++ dvfm_disable_op(op, dvfm_dev_idx);
++ return len;
++}
++SYSDEV_ATTR(enable_op, 0644, enable_op_show, enable_op_store);
++
++/*
++ * Dump blocked device on specified OP.
++ * And dump the device list that is tracked.
++ */
++static ssize_t trace_show(struct sys_device *sys_dev, struct sysdev_attribute *attr, char *buf)
++{
++ struct op_info *op_entry = NULL;
++ struct dvfm_trace_info *entry = NULL;
++ int len = 0, i;
++ unsigned int blocked_dev;
++
++ for (i = 0; i < op_nums; i++) {
++ blocked_dev = 0;
++ read_lock(&dvfm_op_list->lock);
++ /* op list shouldn't be empty because op_nums is valid */
++ list_for_each_entry(op_entry, &dvfm_op_list->list, list) {
++ if (op_entry->index == i)
++ blocked_dev = op_entry->device;
++ }
++ read_unlock(&dvfm_op_list->lock);
++ if (!blocked_dev)
++ continue;
++
++ len += sprintf(buf + len, "Blocked devices on OP%d:", i);
++ read_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ if (test_bit(entry->index, (void *)&blocked_dev))
++ len += sprintf(buf + len, "%s, ", entry->name);
++ }
++ read_unlock(&dvfm_trace_list.lock);
++ len += sprintf(buf + len, "\n");
++ }
++ if (len == 0)
++ len += sprintf(buf + len, "None device block OP\n");
++ len += sprintf(buf + len, "Trace device list:\n");
++ read_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ len += sprintf(buf + len, "%s, ", entry->name);
++ }
++ read_unlock(&dvfm_trace_list.lock);
++ len += sprintf(buf + len, "\n");
++ return len;
++}
++SYSDEV_ATTR(trace, 0444, trace_show, NULL);
++
++#ifdef CONFIG_CPU_PXA310
++static ssize_t freq_show(struct sys_device *sys_dev, struct sysdev_attribute *attr, char *buf)
++{
++ struct op_info *op = NULL;
++ int len = 0;
++
++ if (dvfm_driver->freq_show) {
++ if (!dvfm_find_op(cur_op, &op)) {
++ len = dvfm_driver->freq_show(dvfm_driver->priv, op, buf);
++ }
++ }
++
++ return len;
++}
++/*
++ * We can define a freq_store to set frequencies with a lot of parameters,
++ * If a new set of frequencies is inputed by that way, it will only be treated
++ * as a non-standard op, not a new op. So the freq_store function isn't defined.
++ */
++SYSDEV_ATTR(frequency, 0644, freq_show, NULL);
++#endif
++
++static struct attribute *dvfm_attr[] = {
++ &attr_op.attr,
++ &attr_ops.attr,
++ &attr_enable_op.attr,
++ &attr_trace.attr,
++#ifdef CONFIG_CPU_PXA310
++ &attr_frequency.attr,
++#endif
++};
++#endif
++
++int dvfm_op_count(void)
++{
++ int ret = -EINVAL;
++
++ if (dvfm_driver && dvfm_driver->count)
++ ret = dvfm_driver->count(dvfm_driver->priv, dvfm_op_list);
++ return ret;
++}
++EXPORT_SYMBOL(dvfm_op_count);
++
++int dvfm_get_op(struct op_info **p)
++{
++ if (dvfm_find_op(cur_op, p))
++ return -EINVAL;
++ return cur_op;
++}
++EXPORT_SYMBOL(dvfm_get_op);
++
++int dvfm_dump_op(int idx, char *buf)
++{
++ struct op_info *op = NULL;
++ int len = 0;
++
++ if (dvfm_driver && dvfm_driver->dump && !dvfm_find_op(idx, &op))
++ len = dvfm_driver->dump(dvfm_driver->priv, op, buf);
++
++ return len;
++}
++EXPORT_SYMBOL(dvfm_dump_op);
++
++int dvfm_get_op_freq(int idx, struct op_freq *pf)
++{
++ struct op_info *op = NULL;
++ int ret = 0;
++
++ if (dvfm_driver && dvfm_driver->get_freq && !dvfm_find_op(idx, &op))
++ ret = dvfm_driver->get_freq(dvfm_driver->priv, op, pf);
++
++ return ret;
++}
++EXPORT_SYMBOL(dvfm_get_op_freq);
++
++int dvfm_check_active_op(int idx)
++{
++ struct op_info *op = NULL;
++ int ret = 0;
++
++ if (dvfm_driver && dvfm_driver->check_active_op && !dvfm_find_op(idx, &op))
++ ret = dvfm_driver->check_active_op(dvfm_driver->priv, op);
++
++ return ret;
++}
++EXPORT_SYMBOL(dvfm_check_active_op);
++
++int dvfm_get_defop(void)
++{
++ return def_op;
++}
++EXPORT_SYMBOL(dvfm_get_defop);
++
++int dvfm_get_opinfo(int index, struct op_info **p)
++{
++ if (dvfm_find_op(index, p))
++ return -EINVAL;
++ return 0;
++}
++EXPORT_SYMBOL(dvfm_get_opinfo);
++
++
++const char* dvfm_get_op_name(int idx)
++{
++ struct op_info *op = NULL;
++
++ if (dvfm_driver && dvfm_driver->name && !dvfm_find_op(idx, &op))
++ return dvfm_driver->name(dvfm_driver->priv, op);
++
++ return NULL;
++}
++EXPORT_SYMBOL(dvfm_get_op_name);
++
++
++int dvfm_set_op(struct dvfm_freqs *freqs, unsigned int new,
++ unsigned int relation)
++{
++ int ret = -EINVAL;
++
++ /* check whether dvfm is enabled */
++ if (!dvfm_driver || !dvfm_driver->count)
++ return -EINVAL;
++ if (dvfm_driver->set)
++ ret = dvfm_driver->set(dvfm_driver->priv, freqs, new, relation);
++ return ret;
++}
++
++/* Request operating point. System may set higher frequency because of
++ * device constraint.
++ */
++int dvfm_request_op(int index)
++{
++ int ret = -EFAULT;
++
++ /* check whether dvfm is enabled */
++ if (!dvfm_driver || !dvfm_driver->count)
++ return -EINVAL;
++#ifdef CONFIG_BPMD
++ printk(KERN_ERR "please don't use this API\n");
++ WARN_ON(1);
++#endif
++
++ if (dvfm_driver->request_set)
++ ret = dvfm_driver->request_set(dvfm_driver->priv, index);
++
++ return ret;
++}
++EXPORT_SYMBOL(dvfm_request_op);
++
++/*
++ * Device remove the constraint on OP.
++ */
++int __dvfm_enable_op(int index, int dev_idx)
++{
++ struct op_info *p = NULL;
++ int num;
++
++ /* check whether dvfm is enabled */
++ if (!dvfm_driver || !dvfm_driver->count)
++ return -EINVAL;
++ /* only registered device can invoke DVFM operation */
++ if ((dev_idx >= DVFM_MAX_DEVICE) || dev_idx < 0)
++ return -ENOENT;
++ num = dvfm_driver->count(dvfm_driver->priv, dvfm_op_list);
++ if (num <= index)
++ return -ENOENT;
++ if (!dvfm_find_op(index, &p)) {
++ write_lock(&dvfm_op_list->lock);
++ /* remove device ID */
++ clear_bit(dev_idx, (void *)&p->device);
++ write_unlock(&dvfm_op_list->lock);
++#ifndef CONFIG_BPMD
++ dvfm_driver->enable_op(dvfm_driver->priv, index, RELATION_LOW);
++#endif
++
++ }
++ return 0;
++}
++
++/*
++ * Device set constraint on OP
++ */
++int __dvfm_disable_op(int index, int dev_idx)
++{
++ struct op_info *p = NULL;
++ int num;
++
++ /* check whether dvfm is enabled */
++ if (!dvfm_driver || !dvfm_driver->count)
++ return -EINVAL;
++ /* only registered device can invoke DVFM operation */
++ if ((dev_idx >= DVFM_MAX_DEVICE) || dev_idx < 0)
++ return -ENOENT;
++ num = dvfm_driver->count(dvfm_driver->priv, dvfm_op_list);
++ if (num <= index)
++ return -ENOENT;
++ if (!dvfm_find_op(index, &p)) {
++ write_lock(&dvfm_op_list->lock);
++ /* set device ID */
++ set_bit(dev_idx, (void *)&p->device);
++ write_unlock(&dvfm_op_list->lock);
++ dvfm_driver->disable_op(dvfm_driver->priv, index, RELATION_LOW);
++ }
++ return 0;
++}
++
++int __dvfm_disable_op2(int index, int dev_idx)
++{
++ struct op_info *p = NULL;
++ int num;
++
++ if (!dvfm_driver || !dvfm_driver->count) {
++ return -ENOENT;
++ }
++ num = dvfm_driver->count(dvfm_driver->priv, dvfm_op_list);
++ if (num <= index)
++ return -ENOENT;
++ if (!dvfm_find_op(index, &p)) {
++ write_lock(&dvfm_op_list->lock);
++ set_bit(dev_idx, (void *)&p->device);
++ write_unlock(&dvfm_op_list->lock);
++ }
++ return 0;
++}
++
++int dvfm_enable_op(int index, int dev_idx)
++{
++#ifdef CONFIG_BPMD
++ bpm_enable_op(index, dev_idx);
++#else
++ __dvfm_enable_op(index, dev_idx);
++#endif
++ return 0;
++}
++
++int dvfm_disable_op(int index, int dev_idx)
++{
++#ifdef CONFIG_BPMD
++ bpm_disable_op(index, dev_idx);
++#else
++ __dvfm_disable_op(index, dev_idx);
++#endif
++ return 0;
++}
++
++EXPORT_SYMBOL(dvfm_enable_op);
++EXPORT_SYMBOL(dvfm_disable_op);
++
++int __dvfm_enable_op_name(char *name, int dev_idx)
++{
++ struct op_info *p = NULL;
++ int index;
++
++ if (!dvfm_driver || !dvfm_driver->name || !name)
++ return -EINVAL;
++ /* only registered device can invoke DVFM operation */
++ if ((dev_idx >= DVFM_MAX_DEVICE) || dev_idx < 0)
++ return -ENOENT;
++ list_for_each_entry(p, &dvfm_op_list->list, list) {
++ if (!strcmp(dvfm_driver->name(dvfm_driver->priv, p), name)) {
++ index = p->index;
++ write_lock(&dvfm_op_list->lock);
++ clear_bit(dev_idx, (void *)&p->device);
++ write_unlock(&dvfm_op_list->lock);
++ dvfm_driver->enable_op(dvfm_driver->priv,
++ index, RELATION_LOW);
++ break;
++ }
++ }
++ return 0;
++}
++
++int __dvfm_disable_op_name(char *name, int dev_idx)
++{
++ struct op_info *p = NULL;
++ int index;
++
++ if (!dvfm_driver || !dvfm_driver->name || !name)
++ return -EINVAL;
++ /* only registered device can invoke DVFM operation */
++ if ((dev_idx >= DVFM_MAX_DEVICE) || dev_idx < 0)
++ return -ENOENT;
++ list_for_each_entry(p, &dvfm_op_list->list, list) {
++ if (!strcmp(dvfm_driver->name(dvfm_driver->priv, p), name)) {
++ index = p->index;
++ write_lock(&dvfm_op_list->lock);
++ set_bit(dev_idx, (void *)&p->device);
++ write_unlock(&dvfm_op_list->lock);
++ dvfm_driver->disable_op(dvfm_driver->priv,
++ index, RELATION_LOW);
++ break;
++ }
++ }
++ return 0;
++}
++
++/*
++EXPORT_SYMBOL(dvfm_enable_op_name);
++EXPORT_SYMBOL(dvfm_disable_op_name);
++*/
++
++int _dvfm_enable_op_name(char *name, int dev_idx, char *sid)
++{
++ int ret;
++#ifdef CONFIG_BPMD
++ ret = bpm_enable_op_name(name, dev_idx, sid);
++#else
++ ret = __dvfm_enable_op_name(name, dev_idx);
++#endif
++ return ret;
++}
++
++int _dvfm_disable_op_name(char *name, int dev_idx, char *sid)
++{
++ int ret;
++#ifdef CONFIG_BPMD
++ ret = bpm_disable_op_name(name, dev_idx, sid);
++#else
++ ret = __dvfm_disable_op_name(name, dev_idx);
++#endif
++ return ret;
++}
++
++EXPORT_SYMBOL(_dvfm_enable_op_name);
++EXPORT_SYMBOL(_dvfm_disable_op_name);
++
++/* Only enable those safe operating point */
++int dvfm_enable(int dev_idx)
++{
++ printk(KERN_WARNING "dvfm_enable() is not preferred\n");
++ WARN_ON(1);
++ if (!dvfm_driver || !dvfm_driver->count || !dvfm_driver->enable_dvfm)
++ return -ENOENT;
++ return dvfm_driver->enable_dvfm(dvfm_driver->priv, dev_idx);
++}
++
++/* return whether the result is zero */
++int dvfm_disable(int dev_idx)
++{
++ printk(KERN_WARNING "dvfm_disable() is not preferred\n");
++ WARN_ON(1);
++ if (!dvfm_driver || !dvfm_driver->count || !dvfm_driver->disable_dvfm)
++ return -ENOENT;
++ return dvfm_driver->disable_dvfm(dvfm_driver->priv, dev_idx);
++}
++
++/* return whether the result is zero */
++int dvfm_enable_pm(void)
++{
++ return atomic_inc_and_test(&lp_count);
++}
++
++/* return whether the result is zero */
++int dvfm_disable_pm(void)
++{
++ return atomic_dec_and_test(&lp_count);
++}
++
++int dvfm_notifier_frequency(struct dvfm_freqs *freqs, unsigned int state)
++{
++ int ret;
++
++ switch (state) {
++ case DVFM_FREQ_PRECHANGE:
++ ret = atomic_notifier_call_chain(&dvfm_freq_notifier_list,
++ DVFM_FREQ_PRECHANGE, freqs);
++ if (ret != NOTIFY_DONE)
++ pr_debug("Failure in device driver before "
++ "switching frequency\n");
++ break;
++ case DVFM_FREQ_POSTCHANGE:
++ ret = atomic_notifier_call_chain(&dvfm_freq_notifier_list,
++ DVFM_FREQ_POSTCHANGE, freqs);
++ if (ret != NOTIFY_DONE)
++ pr_debug("Failure in device driver after "
++ "switching frequency\n");
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++int dvfm_register_notifier(struct notifier_block *nb, unsigned int list)
++{
++ int ret;
++
++ switch (list) {
++ case DVFM_FREQUENCY_NOTIFIER:
++ ret = atomic_notifier_chain_register(
++ &dvfm_freq_notifier_list, nb);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dvfm_register_notifier);
++
++int dvfm_unregister_notifier(struct notifier_block *nb, unsigned int list)
++{
++ int ret;
++
++ switch (list) {
++ case DVFM_FREQUENCY_NOTIFIER:
++ ret = atomic_notifier_chain_unregister(
++ &dvfm_freq_notifier_list, nb);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dvfm_unregister_notifier);
++
++/*
++ * add device into trace list
++ * return device index
++ */
++static int add_device(char *name)
++{
++ struct dvfm_trace_info *entry = NULL, *new = NULL;
++ int min;
++
++ min = find_first_zero_bit(&dvfm_trace_list.device, DVFM_MAX_DEVICE);
++ if (min == DVFM_MAX_DEVICE)
++ return -EINVAL;
++
++ /* If device trace table is NULL */
++ new = kzalloc(sizeof(struct dvfm_trace_info), GFP_ATOMIC);
++ if (new == NULL)
++ goto out_mem;
++ /* add new item */
++ strcpy(new->name, name);
++ new->index = min;
++ /* insert the new item in increasing order */
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ if (entry->index > min) {
++ list_add_tail(&(new->list), &(entry->list));
++ goto inserted;
++ }
++ }
++ list_add_tail(&(new->list), &(dvfm_trace_list.list));
++inserted:
++ set_bit(min, (void *)&dvfm_trace_list.device);
++
++ return min;
++out_mem:
++ return -ENOMEM;
++}
++
++/*
++ * Query the device number that registered in DVFM
++ */
++int dvfm_query_device_num(void)
++{
++ int count = 0;
++ struct dvfm_trace_info *entry = NULL;
++
++ read_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ count++;
++ }
++ read_unlock(&dvfm_trace_list.lock);
++ return count;
++}
++EXPORT_SYMBOL(dvfm_query_device_num);
++
++/*
++ * Query all device name that registered in DVFM
++ */
++int dvfm_query_device_list(void *mem, int len)
++{
++ int count = 0, size;
++ struct dvfm_trace_info *entry = NULL;
++ struct name_list *p = (struct name_list *)mem;
++
++ count = dvfm_query_device_num();
++ size = sizeof(struct name_list);
++ if (len < count * size)
++ return -ENOMEM;
++
++ read_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(entry, &dvfm_trace_list.list, list) {
++ p->id = entry->index;
++ strcpy(p->name, entry->name);
++ p++;
++ }
++ read_unlock(&dvfm_trace_list.lock);
++ return 0;
++}
++EXPORT_SYMBOL(dvfm_query_device_list);
++
++/*
++ * Device driver register itself to DVFM before any operation.
++ * The number of registered device is limited in 32.
++ */
++int dvfm_register(char *name, int *id)
++{
++ struct dvfm_trace_info *p = NULL;
++ int len, idx;
++
++ if (name == NULL)
++ return -EINVAL;
++
++ /* device name is stricted in 32 bytes */
++ len = strlen(name);
++ if (len > DVFM_MAX_NAME)
++ len = DVFM_MAX_NAME;
++ write_lock(&dvfm_trace_list.lock);
++ list_for_each_entry(p, &dvfm_trace_list.list, list) {
++ if (!strcmp(name, p->name)) {
++ /*
++ * Find device in device trace table
++ * Skip to allocate new ID
++ */
++ *id = p->index;
++ goto out;
++ }
++ }
++ idx = add_device(name);
++ if (idx < 0)
++ goto out_num;
++ *id = idx;
++out:
++ write_unlock(&dvfm_trace_list.lock);
++ return 0;
++out_num:
++ write_unlock(&dvfm_trace_list.lock);
++ return -EINVAL;
++}
++EXPORT_SYMBOL(dvfm_register);
++
++/*
++ * Release the device and free the device index.
++ */
++int dvfm_unregister(char *name, int *id)
++{
++ struct op_info *q = NULL;
++ struct dvfm_trace_info *p = NULL;
++ int len, num, i;
++
++ if (!dvfm_driver || !dvfm_driver->count || (name == NULL))
++ return -EINVAL;
++
++ /* device name is stricted in 32 bytes */
++ len = strlen(name);
++ if (len > DVFM_MAX_NAME)
++ len = DVFM_MAX_NAME;
++
++ num = dvfm_driver->count(dvfm_driver->priv, dvfm_op_list);
++
++ write_lock(&dvfm_trace_list.lock);
++ if (list_empty(&dvfm_trace_list.list))
++ goto out;
++ list_for_each_entry(p, &dvfm_trace_list.list, list) {
++ if (!strncmp(name, p->name, len)) {
++ for (i = 0; i < num; ++i) {
++ if (!dvfm_find_op(i, &q)) {
++ write_lock(&dvfm_op_list->lock);
++ if (test_bit(p->index, (void *)&q->device)) {
++ printk(KERN_ERR "%s uses PM interface unrightly, please clean the constraint before quit!\n", name);
++ dvfm_enable_op(i, p->index);
++ }
++ write_unlock(&dvfm_op_list->lock);
++ }
++ }
++
++ /* clear the device index */
++ clear_bit(*id, (void *)&dvfm_trace_list.device);
++ *id = -1;
++ list_del(&p->list);
++ kfree(p);
++ break;
++ }
++ }
++ write_unlock(&dvfm_trace_list.lock);
++ return 0;
++out:
++ write_unlock(&dvfm_trace_list.lock);
++ return -ENOENT;
++}
++EXPORT_SYMBOL(dvfm_unregister);
++
++#ifndef CONFIG_BPMD
++static int dvfm_add(struct sys_device *sys_dev)
++{
++ int i, n;
++ int ret;
++
++ n = ARRAY_SIZE(dvfm_attr);
++ for (i = 0; i < n; i++) {
++ ret = sysfs_create_file(&(sys_dev->kobj), dvfm_attr[i]);
++ if (ret)
++ return -EIO;
++ }
++ return 0;
++}
++
++static int dvfm_rm(struct sys_device *sys_dev)
++{
++ int i, n;
++ n = ARRAY_SIZE(dvfm_attr);
++ for (i = 0; i < n; i++) {
++ sysfs_remove_file(&(sys_dev->kobj), dvfm_attr[i]);
++ }
++ return 0;
++}
++
++static int dvfm_suspend(struct sys_device *sysdev, pm_message_t pmsg)
++{
++ return 0;
++}
++
++static int dvfm_resume(struct sys_device *sysdev)
++{
++ return 0;
++}
++
++static struct sysdev_driver dvfm_sysdev_driver = {
++ .add = dvfm_add,
++ .remove = dvfm_rm,
++ .suspend = dvfm_suspend,
++ .resume = dvfm_resume,
++};
++#endif
++
++int dvfm_register_driver(struct dvfm_driver *driver_data, struct info_head *op_list)
++{
++ int ret = 0;
++ if (!driver_data || !driver_data->set)
++ return -EINVAL;
++ if (dvfm_driver)
++ return -EBUSY;
++ dvfm_driver = driver_data;
++
++ if (!op_list)
++ return -EINVAL;
++ dvfm_op_list = op_list;
++
++#ifndef CONFIG_BPMD
++ /* enable_op need to invoke dvfm operation */
++ dvfm_register("User", &dvfm_dev_idx);
++ ret = sysdev_driver_register(&cpu_sysdev_class, &dvfm_sysdev_driver);
++#endif
++ return ret;
++}
++
++int dvfm_unregister_driver(struct dvfm_driver *driver)
++{
++#ifndef CONFIG_BPMD
++ sysdev_driver_unregister(&cpu_sysdev_class, &dvfm_sysdev_driver);
++ dvfm_unregister("User", &dvfm_dev_idx);
++#endif
++ dvfm_driver = NULL;
++ return 0;
++}
++
++unsigned int NextWakeupTimeAbs;
++unsigned int AppsSyncEnabled = 0;
++
++//this function should be called form ACIPC driver when comm relenquish events occurs
++int dvfm_notify_next_comm_wakeup_time(unsigned int NextWakeupTimeRel)
++{
++ unsigned int TimeStamp;
++
++ TimeStamp = dvfm_driver->read_time();
++
++ if (NextWakeupTimeRel == 0)
++ {
++ AppsSyncEnabled = 0;
++ }
++ else
++ {
++ AppsSyncEnabled = 1;
++ }
++ //we receive the next relative comm wakeup time and add to current TS to get the absolute time of the next comm wakeup.
++ //this value is stored in a global variable for future use. this should be done every time the comm side goes to D2
++ NextWakeupTimeAbs = NextWakeupTimeRel + TimeStamp;
++ return 0;
++}
++
++//this function should be called from mspm_idle when we want to go to D2 to check when the next wakeup will occur.
++int dvfm_is_comm_wakep_near(void)
++{
++ unsigned int TimeStamp;
++ TimeStamp = dvfm_driver->read_time();
++
++ //if the feature is not enabled we should not prevent D2.
++ if (!AppsSyncEnabled)
++ return 0;
++
++ if (NextWakeupTimeAbs - TimeStamp < APPS_COMM_D2_THRESHOLD)
++ {
++ return (NextWakeupTimeAbs - TimeStamp); //preventing D2
++ }
++ else
++ {
++ return 0; //allowing D2
++ }
++}
++
++MODULE_DESCRIPTION("Basic DVFM support for Monahans");
++MODULE_LICENSE("GPL");
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/bpm.h kernel/arch/arm/mach-pxa/include/mach/bpm.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/bpm.h 2009-12-13 12:59:07.871960663 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/bpm.h 2009-12-12 16:09:26.446281263 +0200
+@@ -0,0 +1,57 @@
++/*
++ * Copyright (C) 2003-2004 Intel Corporation.
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++ *
++ *
++ * (C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ *
++ * (C) Copyright 2008 Borqs Corporation.
++ * All Rights Reserved
++ */
++
++#ifndef __BPM_H__
++#define __BPM_H__
++
++#ifdef __KERNEL__
++
++/* 10 BPM event max */
++#define MAX_BPM_EVENT_NUM 10
++#define INFO_SIZE 128
++
++struct bpm_event {
++ int type; /* What type of IPM events. */
++ int kind; /* What kind, or sub-type of events. */
++ unsigned char info[INFO_SIZE]; /* events specific data. */
++};
++
++/* IPM events queue */
++struct bpm_event_queue{
++ int head;
++ int tail;
++ int len;
++ struct bpm_event bpmes[MAX_BPM_EVENT_NUM];
++ wait_queue_head_t waitq;
++};
++
++/* IPM event types. */
++#define IPM_EVENT_PROFILER 0x7 /* Profiler events. */
++
++#define IPM_EVENT_BLINK (0xA0)
++
++/* IPM event kinds. */
++#define IPM_EVENT_IDLE_PROFILER 0x1
++#define IPM_EVENT_PERF_PROFILER 0x2
++
++#define IPM_EVENT_BLINK_SPEEDUP (0x1)
++
++/* IPM event infos, not defined yet. */
++#define IPM_EVENT_NULLINFO 0x0
++
++/* IPM functions */
++extern int bpm_event_notify(int type, int kind, void *info, unsigned int info_len);
++#endif
++
++#endif
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/dvfm.h kernel/arch/arm/mach-pxa/include/mach/dvfm.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/dvfm.h 2009-12-13 12:59:13.655291426 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/dvfm.h 2009-12-12 16:09:26.446281263 +0200
+@@ -0,0 +1,226 @@
++/*
++ * Copyright (C) 2003-2004 Intel Corporation.
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++ *
++
++ *(C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#ifndef DVFM_H
++#define DVFM_H
++
++
++#ifdef __KERNEL__
++enum {
++ FV_NOTIFIER_QUERY_SET = 1,
++ FV_NOTIFIER_PRE_SET = 2,
++ FV_NOTIFIER_POST_SET = 3,
++};
++
++
++#define MAXTOKENS 80
++#define CONSTRAINT_NAME_LEN 20
++
++#define DVFM_MAX_NAME 32
++#define DVFM_MAX_DEVICE 32
++
++#define DVFM_FREQUENCY_NOTIFIER 0
++#define DVFM_LOWPOWER_NOTIFIER 1
++
++#define DVFM_FREQ_PRECHANGE 0
++#define DVFM_FREQ_POSTCHANGE 1
++
++#define DVFM_LOWPOWER_PRECHANGE 0
++#define DVFM_LOWPOWER_POSTCHANGE 1
++#define APPS_COMM_D2_THRESHOLD 326
++
++/* set the lowest operating point that is equal or higher than specified */
++#define RELATION_LOW 0
++/* set the highest operating point that is equal or lower than specified */
++#define RELATION_HIGH 1
++/* set the specified operating point */
++#define RELATION_STICK 2
++
++/* Both of these states are used in statistical calculation */
++#define CPU_STATE_RUN 1
++#define CPU_STATE_IDLE 2
++
++/*
++ * operating point definition
++ */
++
++struct op_info {
++ void *op;
++ struct list_head list;
++ unsigned int index;
++ unsigned int device; /* store the device ID blocking OP */
++};
++
++struct dvfm_freqs {
++ unsigned int old; /* operating point index */
++ unsigned int new; /* operating point index */
++ struct op_info old_info;
++ struct op_info new_info;
++ unsigned int flags;
++};
++
++struct op_freq {
++ unsigned int cpu_freq;
++};
++
++struct dvfm_op {
++ int index;
++ int count;
++ unsigned int cpu_freq;
++ const char* name;
++};
++
++struct info_head {
++ struct list_head list;
++ rwlock_t lock;
++ unsigned int device; /* store the registerred device ID */
++};
++
++struct head_notifier {
++ spinlock_t lock;
++ struct notifier_block *head;
++};
++
++/**
++ * struct dvfm_lock - the lock struct of dvfm
++ * @lock: the spin lock struct.
++ * @flags: the flags for spin lock.
++ * @count: the count of dvfm_disable_op_name() or dvfm_enable_op_name()
++ *
++ * This struct is used for the mutex lock of dvfm_disable_op_name() and
++ * dvfm_enable_op_name(). The caller can not call dvfm_enable_op_name()
++ * without call dvfm_disable_op_name() before, so the caller of
++ * dvfm_disable_op_name() and dvfm_enable_op_name() must record the
++ * called times of these two functions.
++ */
++struct dvfm_lock {
++ spinlock_t lock;
++ unsigned long flags;
++ int dev_idx;
++ int count;
++};
++
++/*
++ * Store the dev_id and dev_name.
++ * Registered device number can't be larger than 32.
++ */
++struct dvfm_trace_info {
++ struct list_head list;
++ int index; /* index is [0,31] */
++ unsigned int dev_id; /* dev_id == 1 << index */
++ char name[DVFM_MAX_NAME];
++};
++
++
++struct dvfm_driver {
++ int (*get_opinfo)(void *driver_data, void *info);
++ int (*count)(void *driver_data, struct info_head *op_table);
++ int (*set)(void *driver_data, struct dvfm_freqs *freq, unsigned int new,
++ unsigned int relation);
++ int (*dump)(void *driver_data, struct op_info *md, char *buf);
++ char * (*name)(void *driver_data, struct op_info *md);
++ int (*request_set)(void *driver_data, int index);
++ int (*enable_dvfm)(void *driver_data, int dev_id);
++ int (*disable_dvfm)(void *driver_data, int dev_id);
++ int (*enable_op)(void *driver_data, int index, int relation);
++ int (*disable_op)(void *driver_data, int index, int relation);
++ int (*volt_show)(void *driver_data, char *buf);
++#ifdef CONFIG_CPU_PXA310
++ int (*freq_show)(void *driver_date, struct op_info *md, char *buf);
++#endif
++ unsigned int (*ticks_to_usec)(unsigned int);
++ unsigned int (*ticks_to_sec)(unsigned int);
++ unsigned int (*read_time)(void);
++ int (*get_freq)(void* driver_data, struct op_info *md, struct op_freq *freq);
++ int (*check_active_op)(void *driver_data, struct op_info *md);
++ void *priv;
++};
++
++extern struct dvfm_driver *dvfm_driver;
++extern struct info_head *dvfm_op_list;
++extern unsigned int op_nums;
++
++extern int dvfm_notifier_frequency(struct dvfm_freqs *freqs, unsigned int state);
++extern int dvfm_notifier_lowpower(struct dvfm_freqs *freqs, unsigned int state);
++extern int dvfm_register_notifier(struct notifier_block *nb, unsigned int list);
++extern int dvfm_unregister_notifier(struct notifier_block *nb, unsigned int list);
++extern int dvfm_register_driver(struct dvfm_driver *driver_data, struct info_head *op_list);
++extern int dvfm_unregister_driver(struct dvfm_driver *driver);
++extern int dvfm_register(char *name, int *);
++extern int dvfm_unregister(char *name, int *);
++extern int dvfm_query_device_num(void);
++extern int dvfm_query_device_list(void *, int);
++
++extern int dvfm_enable_op(int, int);
++extern int dvfm_disable_op(int, int);
++extern int dvfm_enable(int);
++extern int dvfm_enable_op_name(char *, int);
++extern int dvfm_disable_op_name(char *, int);
++extern int dvfm_disable(int);
++extern int dvfm_dump_op(int, char*);
++
++extern int dvfm_set_op(struct dvfm_freqs *, unsigned int, unsigned int);
++extern int dvfm_get_op(struct op_info **);
++extern int dvfm_get_op_freq(int, struct op_freq *);
++extern int dvfm_check_active_op(int);
++extern int dvfm_get_defop(void);
++extern int dvfm_get_opinfo(int, struct op_info **);
++extern int dvfm_request_op(int);
++extern int dvfm_op_count(void);
++extern int dvfm_find_op(int, struct op_info **);
++extern int dvfm_trace(char *);
++extern int dvfm_add_event(int, int, int, int);
++extern int dvfm_add_timeslot(int, int);
++extern int calc_switchtime_start(int, int, unsigned int);
++extern int calc_switchtime_end(int, int, unsigned int);
++
++//hanling comm apps sync
++extern int dvfm_notify_next_comm_wakeup_time(unsigned int NextWakeupTimeRel);
++extern int dvfm_is_comm_wakep_near(void);
++
++extern const char* dvfm_get_op_name(int);
++
++/**
++ * dvfm_disable_op_name: - disable the operating point by its name.
++ * @name: the operating point's name.
++ *
++ * Context: process and interrupt.
++ *
++ * disable the operating points by op's name, op name set includes
++ * "D0CS","156M","208M","416M","624M" and "D2".
++ *
++ * Returns zero on success, else negative errno.
++ */
++#define dvfm_disable_op_name(name, dev_idx) \
++ (_dvfm_disable_op_name(name, dev_idx, __FILE__))
++
++extern int _dvfm_disable_op_name(char *name, int dev_idx, char *sid);
++
++/**
++ * dvfm_enable_op_name: - enable the operating point by its name.
++ * @name: the operating point's name.
++ *
++ * Context: process and interrupt.
++ *
++ * enable the operating points by op's name, op name set includes
++ * "D0CS","156M","208M","416M","624M" and "D2".
++ *
++ * Returns zero on success, else negative errno.
++ */
++#define dvfm_enable_op_name(name, dev_idx) \
++ (_dvfm_enable_op_name(name, dev_idx, __FILE__))
++
++extern int _dvfm_enable_op_name(char *name, int dev_idx, char *sid);
++
++#endif
++
++#endif
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/mspm_prof.h kernel/arch/arm/mach-pxa/include/mach/mspm_prof.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/mspm_prof.h 2009-12-13 12:59:18.941953014 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/mspm_prof.h 2009-12-12 16:09:26.456281390 +0200
+@@ -0,0 +1,66 @@
++/*
++ * PXA Performance profiler and Idle profiler Routines
++ *
++ * Copyright (c) 2003 Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * (C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#ifndef MSPM_PROF_H
++#define MSPM_PROF_H
++
++#include <mach/pmu.h>
++#include <mach/xscale-pmu.h>
++
++#define IPM_IDLE_PROFILER 1
++#define IPM_PMU_PROFILER 2
++
++struct ipm_profiler_result {
++ struct pmu_results pmu;
++ unsigned int busy_ratio; /* CPU busy ratio */
++ unsigned int mips;
++ unsigned int window_size;
++};
++
++struct ipm_profiler_arg {
++ unsigned int size; /* size of ipm_profiler_arg */
++ unsigned int flags;
++ unsigned int window_size; /* in microseconds */
++ unsigned int pmn0;
++ unsigned int pmn1;
++ unsigned int pmn2;
++ unsigned int pmn3;
++};
++
++#ifdef __KERNEL__
++extern volatile int hlt_counter;
++
++#define OSCR_MASK ~(1UL)
++
++#undef MAX_OP_NUM
++#define MAX_OP_NUM 20
++
++/* The minimum sample window is 20ms, the default window is 100ms */
++#define MIN_SAMPLE_WINDOW 20
++#define DEF_SAMPLE_WINDOW 100
++
++#define DEF_HIGH_THRESHOLD 80
++#define DEF_LOW_THRESHOLD 20
++
++extern int mspm_add_event(int op, int cpu_idle);
++extern int mspm_prof_init(void);
++extern void mspm_prof_exit(void);
++
++extern int bpm_event_notify(int type, int kind, void *info,
++ unsigned int info_len);
++//extern int (*pipm_start_pmu)(struct ipm_profiler_arg *arg);
++//extern int (*pipm_stop_pmu)(void);
++#endif
++
++#endif
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/pmu.h kernel/arch/arm/mach-pxa/include/mach/pmu.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/pmu.h 2009-12-13 12:59:24.521951391 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/pmu.h 2009-12-12 16:09:26.459612243 +0200
+@@ -0,0 +1,555 @@
++/*
++ * "This software program is available to you under a choice of one of two
++ * licenses. You may choose to be licensed under either the GNU General Public
++ * License (GPL) Version 2, June 1991, available at
++ * http://www.fsf.org/copyleft/gpl.html, or the BSD License, the text of
++ * which follows:
++ *
++ * Copyright (c) 1996-2005, Intel Corporation. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * Redistributions of source code must retain the above copyright notice, this
++ * list of conditions and the following disclaimer.
++ *
++ * Redistributions in binary form must reproduce the above copyright notice, this
++ * list of conditions and the following disclaimer in the documentation and/or
++ * other materials provided with the distribution.
++ *
++ * Neither the name of the Intel Corporation ("Intel") nor the names of its
++ * contributors may be used to endorse or promote products derived from this
++ * software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
++ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
++ */
++
++/*
++ * FILENAME: pmu.h
++ *
++ * CORE STEPPING:
++ *
++ * PURPOSE: contains all PMU specific macros, typedefs, and prototypes.
++ * Declares no storage.
++ */
++
++#ifndef __PMU_H__
++#define __PMU_H__
++
++/* PMU Performance Monitor Control Register (PMNC) */
++#define PMU_ID (0x24u << 24)
++#define PMU_COUNTERS_DISALBLE (1u<<4)
++#define PMU_CLOCK_DIVIDER (1u<<3)
++#define PMU_CLOCK_RESET (1u<<2)
++#define PMU_COUNTERS_RESET (1u<<1)
++#define PMU_3_COUNTERS_ENABLE (1u<<0)
++#define PMU_COUNTERS_ENABLE (1u<<0)
++
++/* INTEN & FLAG Registers bit definition*/
++#define PMU_CLOCK_COUNT (1u<<0)
++#define PMU_COUNT_0 (1u<<1)
++#define PMU_COUNT_1 (1u<<2)
++#define PMU_COUNT_2 (1u<<3)
++#define PMU_COUNT_3 (1u<<4)
++
++/*Events combination*/
++/*!evtCount0/2:0x7(instruction count), evtCount1/3:0x0(ICache miss)*/
++#define PMU_EVTCOUNT_1 (0x0007)
++/*!evtCount0/2:0xA(DCache Access), evtCount1/3:0xB(DCache miss)*/
++#define PMU_EVTCOUNT_2 (0x0B0A)
++/*!evtCount0/2:0x1(ICache cannot deliver), evtCount1/3:0x0(ICache miss)*/
++#define PMU_EVTCOUNT_3 (0x0001)
++/*!evtCount0/2:0xB(DBufer stall duration), evtCount1/3:0x9(Dbuffer stall)*/
++#define PMU_EVTCOUNT_4 (0x090B)
++/*!evtCount0/2:0x2(data stall), evtCount1/3:0xC(DCache writeback)*/
++#define PMU_EVTCOUNT_5 (0x0C02)
++/*!evtCount0/2:0x7(instruction count), evtCount1/3:0x3(ITLB miss)*/
++#define PMU_EVTCOUNT_6 (0x0307)
++/*!evtCount0/2:0xA(DCache Access), evtCount/31:0x4(DTLB miss)*/
++#define PMU_EVTCOUNT_7 (0x040A)
++
++/* PXA3xx/PXA900 PML event selector register offset */
++#define PML_ESEL_0_OFF (0x0)
++#define PML_ESEL_1_OFF (0x4)
++#define PML_ESEL_2_OFF (0x8)
++#define PML_ESEL_3_OFF (0xC)
++#define PML_ESEL_4_OFF (0x10)
++#define PML_ESEL_5_OFF (0x14)
++#define PML_ESEL_6_OFF (0x18)
++#define PML_ESEL_7_OFF (0x1C)
++
++enum {
++ PMU_PMNC = 0,
++ PMU_CCNT,
++ PMU_PMN0,
++ PMU_PMN1,
++ PMU_PMN2,
++ PMU_PMN3,
++ PMU_INTEN,
++ PMU_FLAG,
++ PMU_EVTSEL
++};
++
++/*
++ * PMU and PML Event
++ */
++enum {
++ PMU_EVENT_INVALIDATE=0xFFFFFFFFu,
++
++ /*!< L1 Instruction cache miss requires fetch from external memory */
++ PMU_EVENT_L1_INSTRUCTION_MISS=0x0u,
++
++ /*!< L1 Instruction cache cannot deliver an instruction. this indicate
++ * an instruction cache or TLB miss. This event will occur eveyr cycle
++ * in which the condition is present
++ */
++ PMU_EVENT_L1_INSTRUCTION_NOT_DELIVER,
++
++ /*!< Stall due to a data dependency. This event will occur every cycle
++ * in which the condition is present
++ */
++ PMU_EVENT_STALL_DATA_DEPENDENCY,
++
++ /*!< Instruction TLB miss*/
++ PMU_EVENT_INSTRUCTION_TLB_MISS,
++
++ /*!< Data TLB miss*/
++ PMU_EVENT_DATA_TLB_MISS,
++
++ /*!< Branch instruction retired, branch may or many not have changed
++ * program flow. (Counts only B and BL instruction, in both ARM and
++ * Thumb mode)
++ */
++ PMU_EVENT_BRANCH_RETIRED,
++
++ /*!< Branch mispredicted. Counts only B and BL instructions, in both
++ * ARM and Thumb mode
++ */
++ PMU_EVENT_BRANCH_MISPREDICTED,
++
++ /*!< Instruction retired. This event will occur every cycle in which
++ * the condition is present
++ */
++ PMU_EVENT_INSTRUCTION_RETIRED,
++
++ /*!< L1 Data cache buffer full stall. This event will occur every
++ * cycle in which the condition is present.
++ */
++ PMU_EVENT_L1_DATA_STALL,
++
++ /*!< L1 Data cache buffer full stall. This event occur for each
++ * contiguous sequence of this type of stall
++ */
++ PMU_EVENT_L1_DATA_STALL_C,
++
++ /*!< L1 Data cache access, not including Cache Operations. All data
++ * accesses are treated as cacheable accessses and are counted here
++ * even if the cache is not enabled
++ */
++ PMU_EVENT_L1_DATA_ACCESS,
++
++ /*!< L1 Data cache miss, not including Cache Operations. All data
++ * accesses are treated as cachedable accesses and are counted as
++ * misses if the data cache is not enable
++ */
++ PMU_EVENT_L1_DATA_MISS,
++
++ /*!< L1 data cache write-back. This event occures once for each line
++ * that is written back from the cache
++ */
++ PMU_EVENT_L1_DATA_WRITE_BACK,
++
++ /*!< Software changed the PC(b bx bl blx and eor sub rsb add adc sbc
++ * rsc orr mov bic mvn ldm pop) will be counted. The count does not
++ * increment when an exception occurs and the PC changed to the
++ * exception address(e.g.. IRQ, FIR, SWI,...)
++ */
++ PMU_EVENT_SOFTWARE_CHANGED_PC,
++
++ /*!< Branch instruction retired, branch may or may noot have chanaged
++ * program flow.
++ * (Count ALL branch instructions, indirect as well as direct)
++ */
++ PMU_EVENT_BRANCH_RETIRED_ALL,
++
++ /*!< Instruction issue cycle of retired instruction. This event is a
++ * count of the number of core cycle each instruction requires to issue
++ */
++ PMU_EVENT_INSTRUCTION_CYCLE_RETIRED,
++
++ /*!< All change to the PC. (includes software changes and exceptions*/
++ PMU_EVENT_ALL_CHANGED_PC=0x18,
++
++ /*!< Pipe line flush due to branch mispredict or exception*/
++ PMU_EVENT_PIPE_FLUSH_BRANCH,
++
++ /*!< The core could not issue an instruction due to a backed stall.
++ * This event will occur every cycle in which the condition is present
++ */
++ PMU_EVENT_BACKEND_STALL,
++
++ /*!< Multiplier in use. This event will occur every cycle in which
++ * the multiplier is active
++ */
++ PMU_EVENT_MULTIPLIER,
++
++ /*!< Multiplier stalled the instruction pipelien due to resource stall.
++ * This event will occur every cycle in which the condition is present
++ */
++ PMU_EVENT_MULTIPLIER_STALL_PIPE,
++
++ /*!< Coprocessor stalled the instruction pipeline. This event will
++ * occur every cycle in which the condition is present
++ */
++ PMU_EVENT_COPROCESSOR_STALL_PIPE,
++
++ /*!< Data cache stalled the instruction pipeline. This event will
++ * occur every cycle in which the condition is present
++ */
++ PMU_EVENT_DATA_CACHE_STALL_PIPE,
++
++ /*!< Unified L2 Cache request, not including cache operations. This
++ * event includes table walks, data and instruction reqeusts
++ */
++ PMU_EVENT_L2_REQUEST=0x20,
++
++ /*!< Unified L2 cache miss, not including cache operations*/
++ PMU_EVENT_L2_MISS=0x23,
++
++ /*!< Address bus transcation*/
++ PMU_EVENT_ADDRESS_BUS=0x40,
++
++ /*!< Self initiated(Core Generated) address bus transaction*/
++ PMU_EVENT_SELF_INITIATED_ADDRESS,
++
++ /*!< Bus clock. This event occurs onece for each bus cycle*/
++ PMU_EVENT_BUS_CLOCK=0x43,
++
++ /*!< Data bus transaction. This event occurs once for
++ * each data bus cycle
++ */
++ PMU_EVENT_SELF_INITIATED_DATA=0x47,
++
++ /*!< Data bus transaction. This event occures once for
++ * each data bus cycle
++ */
++ PMU_EVENT_BUS_TRANSACTION,
++
++ PMU_EVENT_ASSP_0=0x80,
++ PMU_EVENT_ASSP_1,
++ PMU_EVENT_ASSP_2,
++ PMU_EVENT_ASSP_3,
++ PMU_EVENT_ASSP_4,
++ PMU_EVENT_ASSP_5,
++ PMU_EVENT_ASSP_6,
++ PMU_EVENT_ASSP_7,
++
++ /*!< Power Saving event. This event deactivates the corresponding
++ * PMU event counter
++ */
++ PMU_EVENT_POWER_SAVING=0xFF,
++
++ PXA3xx_EVENT_MASK=0x80000000,
++
++ /*!< Core is performing a new instruction fetch.
++ * e.g. an L2 cache miss.
++ */
++ PXA3xx_EVENT_CORE_INSTRUCTION_FETCH=PXA3xx_EVENT_MASK,
++
++ /*!< Core is performing a new data fetch*/
++ PXA3xx_EVENT_CORE_DATA_FETCH,
++
++ /*!< Core read request count*/
++ PXA3xx_EVENT_CORE_READ,
++
++ /*!< LCD read request cout*/
++ PXA3xx_EVENT_LCD_READ,
++
++ /*!< DMA read request count*/
++ PXA3xx_EVENT_DMA_READ,
++
++ /*!< Camera interface read request cout*/
++ PXA3xx_EVENT_CAMERA_READ,
++
++ /*!< USB 2.0 read request count*/
++ PXA3xx_EVENT_USB20_READ,
++
++ /*!< 2D grahpic read request count*/
++ PXA3xx_EVENT_2D_READ,
++
++ /*!< USB1.1 host read reqeust count*/
++ PXA3xx_EVENT_USB11_READ,
++
++ /*!< PX1 bus unitization. the number of cycles durring which
++ * the PX1 bus is occupied
++ */
++ PXA3xx_EVENT_PX1_UNITIZATION,
++
++ /*!< PX2(sidecar) bus unitization. the number of cycles
++ * durring which the PX2 bus is occupied
++ */
++ PXA3xx_EVENT_PX2_UNITIZATION,
++
++ /*!< Dynamic memory queue for Mandris occupied. the number of
++ * cycles when the DMC queue is not empty
++ */
++ PXA3xx_EVENT_DMC_NOT_EMPTY=PXA3xx_EVENT_MASK|14,
++
++ /*!< Dynamic memory queue for Mandris occupied by more than 1 request.
++ * the number of cycles when the DMC queue has 2 or more requests
++ */
++ PXA3xx_EVENT_DMC_2,
++
++ /*!< Dynamic memory queue for Mandris occupied by more than 2 request.
++ * the number of cycles when the DMC queue has 3 or more requests
++ */
++ PXA3xx_EVENT_DMC_3,
++
++ /*!< Dynamic memory queue for Mandris occupied by more than 3 request.
++ * the number of cycles when the DMC queue is full
++ */
++ PXA3xx_EVENT_DMC_FULL,
++
++ /*!< Static memory queue for Mandris occupied. the number of cycles
++ * when the SMC queue is not empty
++ */
++ PXA3xx_EVENT_SMC_NOT_EMPTY,
++
++ /*!< Static memory queue for Mandris occupied by more than 1 request.
++ * the number of cycles when the SMC queue has 2 or more requests
++ */
++ PXA3xx_EVENT_SMC_2,
++
++ /*!< Static memory queue for Mandris occupied by more than 2 request.
++ * the number of cycles when the SMC queue has 3 or more requests
++ */
++ PXA3xx_EVENT_SMC_3,
++
++ /*!< Static memory queue for Mandris occupied by more than 3 request.
++ * the number of cycles when the SMC queue is full
++ */
++ PXA3xx_EVENT_SMC_FULL,
++
++ /*!< Internal SRAM queue for Mandris occupied. the number of cycles
++ * when the ISRAM queue is not empty
++ */
++ PXA3xx_EVENT_ISRAM_NOT_EMPTY=PXA3xx_EVENT_MASK|26,
++
++ /*!< Internal SRAM queue for Mandris occupied by more than 1 request.
++ * the number of cycles when the ISRAM queue has 2 or more requests
++ */
++ PXA3xx_EVENT_ISRAM_2,
++
++ /*!< Internal SRAM queue for Mandris occupied by more than 2 request.
++ * the number of cycles when the ISRAM queue has 3 or more requests
++ */
++ PXA3xx_EVENT_ISRAM_3,
++
++ /*!< Internal SRAM queue for Mandris occupied by more than 3 request.
++ * the number of cycles when the ISRAM queue is full
++ */
++ PXA3xx_EVENT_ISRAM_FULL,
++
++ /*!< the number of cycles when external memory controller bus
++ * is occupied
++ */
++ PXA3xx_EVENT_EXMEM,
++
++ /*!< the number of cycles when external data flash bus is occupies */
++ PXA3xx_EVENT_DFC,
++
++ /*!< Core write request count*/
++ PXA3xx_EVENT_CORE_WRITE=PXA3xx_EVENT_MASK|36,
++
++ /*!< DMA write request count*/
++ PXA3xx_EVENT_DMA_WRITE,
++
++ /*!< Camera interface write request cout*/
++ PXA3xx_EVENT_CAMERA_WRITE,
++
++ /*!< USB 2.0 write request count*/
++ PXA3xx_EVENT_USB20_WRITE,
++
++ /*!< 2D grahpic write request count*/
++ PXA3xx_EVENT_2D_WRITE,
++
++ /*!< USB1.1 host write reqeust count*/
++ PXA3xx_EVENT_USB11_WRITE,
++
++ /*!< PX1 bus reqeust. length of time that at least one bus request
++ * is asserted on PX bus 1
++ */
++ PXA3xx_EVENT_PX1_REQUEST,
++
++ /*!< PX2 bus reqeust. length of time that at least one bus request
++ * is asserted on PX bus 2
++ */
++ PXA3xx_EVENT_PX2_REQUEST,
++
++ /*!< PX1 bus retries. number of retries on PX bus 1*/
++ PXA3xx_EVENT_PX1_RETRIES,
++
++ /*!< PX2 bus retries. number of retries on PX bus 2*/
++ PXA3xx_EVENT_PX2_RETRIES,
++
++ /*!< Temperature leve 1. time the part has spent in temperature range 1*/
++ PXA3xx_EVENT_TEMPERATURE_1,
++
++ /*!< Temperature leve 1. time the part has spent in temperature range 2*/
++ PXA3xx_EVENT_TEMPERATURE_2,
++
++ /*!< Temperature leve 1. time the part has spent in temperature range 3*/
++ PXA3xx_EVENT_TEMPERATURE_3,
++
++ /*!< Temperature leve 1. time the part has spent in temperature range 4*/
++ PXA3xx_EVENT_TEMPERATURE_4,
++
++ /*!< Core read/write latency measurement. amount of time when core
++ * have more than 1 read/write request outstanding
++ */
++ PXA3xx_EVENT_CORE_LATENCY_1,
++
++ /*!< Core read/write latency measurement. amount of time when core
++ * have more than 2 read/write request outstanding
++ */
++ PXA3xx_EVENT_CORE_LATENCY_2,
++
++ /*!< Core read/write latency measurement. amount of time when core
++ * have more than 3 read/write request outstanding
++ */
++ PXA3xx_EVENT_CORE_LATENCY_3,
++
++ /*!< Core read/write latency measurement. amount of time when core
++ * have more than 4 read/write request outstanding
++ */
++ PXA3xx_EVENT_CORE_LATENCY_4,
++
++ /*!< PX1 to IM read/write latency measurement. Amount of time when
++ * PX1 to IM has more than 1 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_IM_1,
++
++ /*!< PX1 to IM read/write latency measurement. Amount of time when
++ * PX1 to IM has more than 2 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_IM_2,
++
++ /*!< PX1 to IM read/write latency measurement. Amount of time when
++ * PX1 to IM has more than 3 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_IM_3,
++
++ /*!< PX1 to IM read/write latency measurement. Amount of time when
++ * PX1 to IM has more than 4 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_IM_4,
++
++ /*!< PX1 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX1 to DMEM/SMEM has more than 1 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_MEM_1,
++
++ /*!< PX1 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX1 to DMEM/SMEM has more than 2 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_MEM_2,
++
++ /*!< PX1 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX1 to DMEM/SMEM has more than 3 read/write requests outstanding.
++ */
++
++ PXA3xx_EVENT_PX1_MEM_3,
++ /*!< PX1 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX1 to DMEM/SMEM has more than 4 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX1_MEM_4,
++
++ /*!< PX2 to IM read/write latency measurement. Amount of time when
++ * PX2 to IM has more than 1 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_IM_1,
++
++ /*!< PX2 to IM read/write latency measurement. Amount of time when
++ * PX2 to IM has more than 2 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_IM_2,
++
++ /*!< PX2 to IM read/write latency measurement. Amount of time when
++ * PX2 to IM has more than 3 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_IM_3,
++
++ /*!< PX2 to IM read/write latency measurement. Amount of time when
++ * PX2 to IM has more than 4 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_IM_4,
++
++ /*!< PX2 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX2 to DMEM/SMEM has more than 1 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_MEM_1,
++
++ /*!< PX2 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX2 to DMEM/SMEM has more than 2 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_MEM_2,
++
++ /*!< PX2 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX2 to DMEM/SMEM has more than 3 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_MEM_3,
++
++ /*!< PX2 to DMEM/SMEM read/write latency measurement. Amount of time
++ * when PX2 to DMEM/SMEM has more than 4 read/write requests outstanding.
++ */
++ PXA3xx_EVENT_PX2_MEM_4
++};
++
++#ifdef __KERNEL__
++struct pxa3xx_pmu_info {
++ /* performance monitor unit register base */
++ unsigned char __iomem *pmu_base;
++};
++
++#ifdef __cplusplus
++extern "C"
++{
++#endif
++
++/*
++ * This routine reads the designated PMU register via CoProcessor 14
++ *
++ * @param aReg PMU register number to read define in int
++ * @return 32-bit value read from register
++ */
++extern unsigned int pmu_read_reg(unsigned int aReg);
++
++/*
++ * This routine Writes the designated PMU register via CoProcessor 14
++ *
++ * @param aReg PMU register number to read define in int
++ * aValue Value to write to PMU register
++ * @return
++ */
++extern void pmu_write_reg(unsigned int aReg, unsigned int aValue);
++
++extern int pmu_select_event(int counter, int type);
++
++extern void pxa3xx_set_pmu_info(void *info);
++
++#ifdef __cplusplus
++}
++#endif
++#endif
++
++#endif //__PMU_H__
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/prm.h kernel/arch/arm/mach-pxa/include/mach/prm.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/prm.h 2009-12-13 12:59:30.199033933 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/prm.h 2009-12-12 16:09:26.459612243 +0200
+@@ -0,0 +1,138 @@
++/*
++ * include/asm-arm/arch-pxa/prm.h
++ *
++ * Copyright (C) 2006, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __PRM_H
++#define __PRM_H
++
++#include <linux/interrupt.h>
++#include <mach/irqs.h>
++#include <mach/pmu.h>
++#include <mach/pxa3xx_dvfm.h>
++
++#define MAX_GROUPS 2
++#define MAX_CLIENTS 16
++
++typedef enum {
++ /* tag the loweset priority*/
++ PRI_LOWEST = 0,
++ /*define the possible priorities here*/
++ PRI_IPMC = PRI_LOWEST,
++ PRI_PROFILER,
++ PRI_VTUNE,
++ /*tag the highest priority*/
++ MAX_PRIORITIES,
++ PRI_HIGHEST = MAX_PRIORITIES - 1,
++} prm_priority;
++
++struct prm_group;
++struct prm_resource;
++struct prm_resource_state;
++
++typedef enum {
++ PRM_RES_APPROPRIATED,
++ PRM_RES_READY,
++} prm_event;
++
++typedef enum {
++ PRM_CCNT = 0,
++ PRM_PMN0,
++ PRM_PMN1,
++ PRM_PMN2,
++ PRM_PMN3,
++ PRM_VCC0,
++ PRM_VCC1,
++ PRM_IDLE_PROFILER,
++ PRM_COP,
++ RESOURCE_NUM,
++} prm_resource_id;
++
++typedef void (*clientcallback)(prm_event, unsigned int, void *);
++
++/* The gourp includes a set of resources. If one of the set of resources is
++ * appropriated, the other resources will not available for access. But the
++ * resources are still allocated by the client. So the group is defined as
++ * a set of resources that all can be accessed or all can not be accessed.
++ */
++struct prm_group {
++ unsigned int id;
++ /* appropriated resources count */
++ unsigned int appropriated_cnt;
++ /* total resources count in the group */
++ unsigned int member_cnt;
++ /* list for all the resources in the group */
++ struct list_head resources;
++ struct proc_dir_entry *dir;
++};
++
++struct prm_client {
++ /* client id */
++ unsigned int id;
++ /* process id for the client */
++ unsigned int pid;
++ /* priority for the client.(LOW or HIGH) */
++ prm_priority priority;
++ /* name of the client */
++ char *name;
++ /* How many groups in the client */
++ unsigned int group_cnt;
++ /* support MAXGROUP groups, some may be NULL */
++ struct prm_group *groups[MAX_GROUPS];
++ void *client_data;
++ /* notifier for resource appropriate and ready */
++ clientcallback notify;
++ irq_handler_t handler;
++ void *dev_id;
++ struct proc_dir_entry *dir;
++};
++
++struct prm_resource_state {
++ /* which client allocate the resources. In every priority,
++ * there can be only one client allocate the resource
++ */
++ struct prm_client *allocate;
++ /* which group it belongs to */
++ struct prm_group *group;
++ int active;
++ struct prm_resource *resource;
++ /* used by prm_group->resources for link the resources into the group */
++ struct list_head entry;
++ struct proc_dir_entry *dir;
++};
++
++struct prm_resource {
++ struct prm_client *access; /* Only one client can access it */
++ prm_resource_id id;
++ struct prm_resource_state priority[MAX_PRIORITIES];
++ struct proc_dir_entry *dir;
++};
++
++int prm_open_session(prm_priority , char * , clientcallback , void * );
++int prm_close_session(unsigned int );
++int prm_allocate_resource(unsigned int , prm_resource_id , unsigned int );
++int prm_free_resources(unsigned int , unsigned int );
++int prm_commit_resources(unsigned int , unsigned int );
++int pmu_read_register(unsigned int , int , unsigned int * );
++int pmu_write_register(unsigned int , int , unsigned int );
++int pmu_set_event(unsigned int , unsigned int , int * , int );
++int pmu_enable_event_counting(unsigned int );
++int pmu_disable_event_counting(unsigned int );
++int pmu_enable_event_interrupt(unsigned int , int );
++int pmu_disable_event_interrupt(unsigned int , int );
++int pmu_register_isr(unsigned int , irq_handler_t, void * );
++int pmu_unregister_isr(unsigned int );
++int cop_get_num_of_cops(void);
++int cop_get_cop(unsigned int , unsigned int , struct pxa3xx_fv_info *);
++int cop_set_cop(unsigned int , unsigned int , int mode);
++int cop_get_def_cop(unsigned int , unsigned int *, struct pxa3xx_fv_info *);
++int cop_set_def_cop(unsigned int );
++int cop_get_cur_cop(unsigned int , unsigned int *, struct pxa3xx_fv_info *);
++
++#endif
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/pxa3xx_dvfm.h kernel/arch/arm/mach-pxa/include/mach/pxa3xx_dvfm.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/pxa3xx_dvfm.h 2009-12-13 12:59:37.209033179 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/pxa3xx_dvfm.h 2009-12-12 16:09:26.462949527 +0200
+@@ -0,0 +1,94 @@
++/*
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++
++ * (C) Copyright 2007 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#ifndef PXA3XX_DVFM_H
++#define PXA3XX_DVFM_H
++
++#include <mach/dvfm.h>
++#include <mach/pxa3xx_pm.h>
++
++#define DMEMC_FREQ_HIGH 0
++#define DMEMC_FREQ_LOW 1
++#define DMEMC_D0CS_ENTER 2
++#define DMEMC_D0CS_EXIT 3
++
++#define OP_NAME_LEN 16
++
++enum {
++ POWER_MODE_D0 = 0,
++ POWER_MODE_D0CS,
++ POWER_MODE_D1,
++ POWER_MODE_D2,
++ POWER_MODE_CG,
++};
++
++enum {
++ OP_FLAG_FACTORY = 0,
++ OP_FLAG_USER_DEFINED,
++ OP_FLAG_BOOT,
++ OP_FLAG_ALL,
++};
++
++enum {
++ IDLE_D0 = 0,
++ IDLE_D0CS = 1,
++ IDLE_D1 = 2,
++ IDLE_D2 = 4,
++ IDLE_CG = 8,
++};
++
++struct dvfm_md_opt {
++ int vcc_core;
++ int vcc_sram;
++ int xl;
++ int xn;
++ int core;
++ int smcfs;
++ int sflfs;
++ int hss;
++ int dmcfs;
++ int df_clk;
++ int empi_clk;
++ int power_mode;
++ int flag;
++ int lpj;
++ char name[OP_NAME_LEN];
++};
++
++/* This structure is similar to dvfm_md_opt.
++ * Reserve this structure in order to keep compatible
++ */
++struct pxa3xx_fv_info {
++ unsigned long xl;
++ unsigned long xn;
++ unsigned int vcc_core;
++ unsigned int vcc_sram;
++ unsigned long smcfs;
++ unsigned long sflfs;
++ unsigned long hss;
++ unsigned long dmcfs;
++ unsigned long df_clk;
++ unsigned long empi_clk;
++ unsigned long d0cs;
++ /* WARNING: above fields must be consistent with PM_FV_INFO!!!*/
++
++ unsigned long lpj; /* New value for loops_per_jiffy */
++};
++
++struct pxa3xx_freq_mach_info {
++ int flags;
++};
++
++#define PXA3xx_USE_POWER_I2C (1UL << 0)
++extern void set_pxa3xx_freq_info(struct pxa3xx_freq_mach_info *info);
++extern void set_pxa3xx_freq_parent(struct device *parent_dev);
++
++extern int md2fvinfo(struct pxa3xx_fv_info *fv_info, struct dvfm_md_opt *orig);
++
++#endif
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/pxa3xx_pm.h kernel/arch/arm/mach-pxa/include/mach/pxa3xx_pm.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/pxa3xx_pm.h 2009-12-13 12:59:45.791952709 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/pxa3xx_pm.h 2009-12-12 16:09:26.462949527 +0200
+@@ -0,0 +1,530 @@
++/*
++ * Monahans Power Management Routines
++ *
++ * Copyright (C) 2004, Intel Corporation(chao.xie@intel.com).
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++ *
++ *(C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#ifndef __PXA3xx_PM_H__
++#define __PXA3xx_PM_H__
++
++#include <asm/types.h>
++
++/* clock manager registers */
++#define ACCR_OFF 0x00
++#define ACSR_OFF 0x04
++#define AICSR_OFF 0x08
++#define D0CKEN_A_OFF 0x0c
++#define D0CKEN_B_OFF 0x10
++#define AC97_DIV_OFF 0x14
++#define OSCC_OFF 0x10000
++
++/* service power management uinit */
++#define PSR_OFF 0x004
++#define PSPR_OFF 0x008
++#define PCFR_OFF 0x00C
++#define PWER_OFF 0x010
++#define PWSR_OFF 0x014
++#define PECR_OFF 0x018
++#define CSER_OFF 0x01C
++#define DCDCSR_OFF 0x080
++#define AVCR_OFF 0x094
++#define SVCR_OFF 0x098
++#define CVCR_OFF 0x09C
++#define PSBR_OFF 0x0A0
++#define PVCR_OFF 0x100
++#if defined(CONFIG_CPU_PXA935)
++#define SDCR_OFF 0x08C
++#endif
++
++/* slave power management unit */
++#define ASCR_OFF 0x00
++#define ARSR_OFF 0x04
++#define AD3ER_OFF 0x08
++#define AD3SR_OFF 0x0c
++#define AD2D0ER_OFF 0x10
++#define AD2D0SR_OFF 0x14
++#define AD2D1ER_OFF 0x18
++#define AD2D1SR_OFF 0x1c
++#define AD1D0ER_OFF 0x20
++#define AD1D0SR_OFF 0x24
++#define ASDCNT_OFF 0x28
++#define AGENP_OFF 0x2c
++#define AD3R_OFF 0x30
++#define AD2R_OFF 0x34
++#define AD1R_OFF 0x38
++
++/* dynamic memory controller registers */
++#define MDCNFG_OFF 0x0000
++#define MDREFR_OFF 0x0004
++#define FLYCNFG_OFF 0x0020
++#define MDMRS_OFF 0x0040
++#define DDR_SCAL_OFF 0x0050
++#define DDR_HCAL_OFF 0x0060
++#define DDR_WCAL_OFF 0x0068
++#define DMCIER_OFF 0x0070
++#define DMCISR_OFF 0x0078
++#define DMCISR2_OFF 0x007C
++#define DDR_DLS_OFF 0x0080
++#define EMPI_OFF 0x0090
++#define RCOMP_OFF 0x0100
++#define PAD_MA_OFF 0x0110
++#define PAD_MDMSB_OFF 0x0114
++#define PAD_MDLSB_OFF 0x0118
++#define PAD_SDRAM_OFF 0x011C
++#define PAD_SDCLK_OFF 0x0120
++#define PAD_SDCS_OFF 0x0124
++#define PAD_SMEM_OFF 0x0128
++#define PAD_SCLK_OFF 0x012C
++
++/* static memory controller registers */
++#define MSC0_OFF 0x0008
++#define MSC1_OFF 0x000C
++#define MECR_OFF 0x0014
++#define SXCNFG_OFF 0x001C
++#define MCMEM0_OFF 0x0028
++#define MCATT0_OFF 0x0030
++#define MCIO0_OFF 0x0038
++#define MEMCLKCFG_OFF 0x0068
++#define CSADRCFG0_OFF 0x0080
++#define CSADRCFG1_OFF 0x0084
++#define CSADRCFG2_OFF 0x0088
++#define CSADRCFG3_OFF 0x008C
++#define CSADRCFG_P_OFF 0x0090
++#define CSMSADRCFG_OFF 0x00A0
++
++/* OS Timer address space */
++#define OST_START 0x40a00000
++#define OST_END 0x40a000df
++
++/* System Bus Arbiter address space */
++#define ARB_START 0x4600fe00
++#define ARB_END 0x4600fe07
++
++/* Registers offset within ARB space */
++#define ARBCTL1_OFF 0x0000
++#define ARBCTL2_OFF 0x0004
++
++/* Dynamic memory controll address space */
++#define DMC_START 0x48100000
++#define DMC_END 0x48100fff
++
++/* static memory controll address space */
++#define SMC_START 0x4a000000
++#define SMC_END 0x4a0000ff
++
++/* Power Management Unit address space */
++#define PM_START 0x40f50000
++#define PM_END 0x40f5018f
++
++/* Bits definition for Clock Control Register */
++#define ACCR_PCCE (1 << 11)
++
++#define ACSR_XPLCK (1 << 29)
++#define ACSR_SPLCK (1 << 28)
++
++#define AICSR_PCIE (1 << 4)
++#define AICSR_TCIE (1 << 2)
++#define AICSR_FCIE (1 << 0)
++
++/* Bits definition for RTC Register */
++#define RTSR_PICE (1 << 15)
++#define RTSR_PIALE (1 << 14)
++
++/* Bits definition for Power Control Register */
++#define ASCR_RDH (1 << 31)
++#define ASCR_D1S (1 << 2)
++#define ASCR_D2S (1 << 1)
++#define ASCR_D3S (1 << 0)
++#define ASCR_MASK (ASCR_D1S | ASCR_D2S | ASCR_D3S)
++#define PSR_MASK 0x07
++#define PCFR_L1DIS (1 << 13)
++#define PCFR_L0EN (1 << 12)
++#define PECR_E1IS (1 << 31)
++#define PECR_E1IE (1 << 30)
++#define PECR_E0IS (1 << 29)
++#define PECR_E0IE (1 << 28)
++#define PECR_DIR1 (1 << 5)
++#define PECR_DIR0 (1 << 4)
++
++/* Bits definition for Oscillator Configuration Register */
++#define OSCC_GPRM (1 << 18) /* GB PLL Request Mask */
++#define OSCC_GPLS (1 << 17) /* GB PLL Lock Status */
++
++/* Bits definition for Application Subsystem General Purpose Register */
++#define AGENP_GBPLL_CTRL (1 << 29)
++#define AGENP_GBPLL_DATA (1 << 28) /* Turn on/off GB PLL */
++#define AGENP_SAVE_WK (1 << 2) /* Save wakeup */
++
++/* Registers offset within ARB space */
++#define BPB_START 0x42300000
++#define BPB_END 0x4230004B
++
++/* GPIO Wakeup Status Register */
++#define GWSR(x) ((x << 2) + 0x38)
++#define GWSR1 0x3C
++#define GWSR2 0x40
++#define GWSR3 0x44
++#define GWSR4 0x48
++
++/* bits definitions */
++#define ASCR_MTS_OFFSET 12
++#define ASCR_MTS_S_OFFSET 8
++
++#define ACSR_SPLCK_OFFSET 28
++#define ACSR_XPLCK_OFFSET 29
++
++#define ACCR_XL_OFFSET 0
++#define ACCR_XN_OFFSET 8
++#define ACCR_DMCFS_OFFSET 12
++#define ACCR_HSS_OFFSET 14
++#define ACCR_XSPCLK_OFFSET 16
++#define ACCR_SFLFS_OFFSET 18
++#define ACCR_SMCFS_OFFSET 23
++#define ACCR_D0CS_OFFSET 26
++#define ACCR_VAUFS_OFFSET 28
++#define ACCR_SPDIS_OFFSET 30
++#define ACCR_XPDIS_OFFSET 31
++
++#define ACSR_VAUFS_OFFSET 21
++
++#define ACSR_VAUFS_MASK (0x03 << ACSR_VAUFS_OFFSET)
++
++#define MEMCLKCFG_EMPI_OFFSET 0
++#define MEMCLKCFG_DF_OFFSET 16
++
++#define HCAL_HCEN_OFFSET 31
++
++#define MDCNFG_HWNOPHD_OFFSET 28
++#define MDCNFG_HWFREQ_OFFSET 29
++#define MDCNFG_DMCEN_OFFSET 30
++#define MDCNFG_DMAP_OFFSET 31
++
++/* mode save flags */
++#define PM_MODE_SAVE_FLAG_SYS 0x1
++#define PM_MODE_SAVE_FLAG_IRQ 0x2
++#define PM_MODE_SAVE_FLAG_FIQ 0x4
++#define PM_MODE_SAVE_FLAG_ABT 0x8
++#define PM_MODE_SAVE_FLAG_UND 0x10
++#define PM_MODE_SAVE_FLAG_SVC 0x20
++
++/* value for PWRMODE register */
++#define PXA3xx_PM_S2D3C4 0x06
++#define PXA3xx_PM_S0D2C2 0x03
++#define PXA3xx_PM_S3D4C4 0x07
++#define PXA3xx_PM_S0D1C2 0x02
++#define PXA3xx_PM_S0D0C1 0x01
++
++/* CPSR Processor constants */
++#define CPSR_Mode_MASK (0x0000001F)
++#define CPSR_Mode_USR (0x10)
++#define CPSR_Mode_FIQ (0x11)
++#define CPSR_Mode_IRQ (0x12)
++#define CPSR_Mode_SVC (0x13)
++#define CPSR_Mode_ABT (0x17)
++#define CPSR_Mode_UND (0x1B)
++#define CPSR_Mode_SYS (0x1F)
++#define CPSR_I_Bit (0x80)
++#define CPSR_F_Bit (0x40)
++
++
++/****************************************************************************/
++#define PXA3xx_PM_WE_EXTERNAL0 (0x1UL << 0)
++#define PXA3xx_PM_WE_EXTERNAL1 (0x1UL << 1)
++#define PXA3xx_PM_WE_GENERIC(x) (0x1UL << (x + 2))
++#define PXA3xx_PM_WE_DKEY (0x1UL << 2)
++#define PXA3xx_PM_WE_DKEY1 (0x1UL << 3)
++#define PXA3xx_PM_WE_BTUART (0x1UL << 4)
++#define PXA3xx_PM_WE_PMIC (0x1UL << 5)
++#define PXA3xx_PM_WE_NDINT (0x1UL << 6)
++#define PXA3xx_PM_WE_MMC1 (0x1UL << 7)
++#define PXA3xx_PM_WE_MMC2 (0x1UL << 8)
++#define PXA3xx_PM_WE_SSP (0x1UL << 9)
++#define PXA3xx_PM_WE_SSP4 (0x1UL << 10)
++#define PXA3xx_PM_WE_UART1 (0x1UL << 11)
++#define PXA3xx_PM_WE_CI2C (0x1UL << 12)
++#define PXA3xx_PM_WE_SSP2 (0x1UL << 13)
++#define PXA3xx_PM_WE_WDT (0x1UL << 14)
++#define PXA3xx_PM_WE_GPIO (0x1UL << 15)
++#define PXA3xx_PM_WE_OTG (0x1UL << 16)
++#define PXA3xx_PM_WE_INTC (0x1UL << 17)
++#define PXA3xx_PM_WE_MLCD (0x1UL << 18)
++#define PXA3xx_PM_WE_USIM0 (0x1UL << 19)
++#define PXA3xx_PM_WE_USIM1 (0x1UL << 20)
++#define PXA3xx_PM_WE_MKEY (0x1UL << 21)
++#define PXA3xx_PM_WE_MUX2 (0x1UL << 22)
++#define PXA3xx_PM_WE_MUX3 (0x1UL << 23)
++#define PXA3xx_PM_WE_MSL0 (0x1UL << 24)
++#define PXA3xx_PM_WE_RESERVE1 (0x1UL << 25)
++#define PXA3xx_PM_WE_USB2 (0x1UL << 26)
++#define PXA3xx_PM_WE_DMC (0x1UL << 27)
++#define PXA3xx_PM_WE_USBH (0x1UL << 28)
++#define PXA3xx_PM_WE_TSI (0x1UL << 29)
++#define PXA3xx_PM_WE_OST (0x1UL << 30)
++#define PXA3xx_PM_WE_RTC (0x1UL << 31)
++
++
++#define PWSR_EDR0 (0x1 << 0)
++#define PWSR_EDR1 (0x1 << 1)
++#define PWSR_EDF0 (0x1 << 2)
++#define PWSR_EDF1 (0x1 << 3)
++#define PWSR_EERTC (0x1 << 31)
++
++#define PWER_WER0 (0x1 << 0)
++#define PWER_WER1 (0x1 << 1)
++#define PWER_WEF0 (0x1 << 2)
++#define PWER_WEF1 (0x1 << 3)
++#define PWER_WERTC (0x1 << 31)
++
++#define WORD_SIZE 4
++
++/* the position of each data memeber */
++#define SleepState_begin 0x0
++#define SleepState_checksum 0x0
++#define SleepState_wordCount (SleepState_checksum + WORD_SIZE)
++#define SleepState_areaAddress (SleepState_wordCount + WORD_SIZE)
++#define SleepState_modeSaveFlags (SleepState_areaAddress + WORD_SIZE)
++
++/* save ARM registers */
++#define SleepState_ENTRY_REGS (SleepState_modeSaveFlags + WORD_SIZE)
++#define SleepState_ENTRY_CPSR (SleepState_ENTRY_REGS)
++#define SleepState_ENTRY_SPSR (SleepState_ENTRY_CPSR + WORD_SIZE)
++#define SleepState_ENTRY_R0 (SleepState_ENTRY_SPSR + WORD_SIZE)
++#define SleepState_ENTRY_R1 (SleepState_ENTRY_R0 + WORD_SIZE)
++#define SleepState_SYS_REGS (SleepState_ENTRY_REGS + 17*WORD_SIZE)
++#define SleepState_FIQ_REGS (SleepState_SYS_REGS + 2*WORD_SIZE)
++#define SleepState_IRQ_REGS (SleepState_FIQ_REGS + 8*WORD_SIZE)
++#define SleepState_ABT_REGS (SleepState_IRQ_REGS + 3*WORD_SIZE)
++#define SleepState_UND_REGS (SleepState_ABT_REGS + 3*WORD_SIZE)
++#define SleepState_SVC_REGS (SleepState_UND_REGS + 3*WORD_SIZE)
++
++/* save MMU settings */
++#define SleepState_Cp15_ACR_MMU (SleepState_SVC_REGS + 3*WORD_SIZE)
++#define SleepState_Cp15_AUXCR_MMU (SleepState_Cp15_ACR_MMU + WORD_SIZE)
++#define SleepState_Cp15_TTBR_MMU (SleepState_Cp15_AUXCR_MMU + WORD_SIZE)
++#define SleepState_Cp15_DACR_MMU (SleepState_Cp15_TTBR_MMU + WORD_SIZE)
++#define SleepState_Cp15_PID_MMU (SleepState_Cp15_DACR_MMU + WORD_SIZE)
++#define SleepState_Cp15_CPAR (SleepState_Cp15_PID_MMU + WORD_SIZE)
++
++#define SleepState_extendedChecksumByteCount (SleepState_Cp15_CPAR + WORD_SIZE)
++#define SleepState_psprAddress (SleepState_extendedChecksumByteCount + WORD_SIZE)
++#define SleepState_flushFunc (SleepState_psprAddress + WORD_SIZE)
++#define SleepState_end (SleepState_flushFunc + WORD_SIZE)
++#define SleepState_size (SleepState_end - SleepState_begin)
++
++#ifndef __ASSEMBLY__
++
++typedef struct {
++ unsigned long value;
++ struct {
++ unsigned ext0 : 1;
++ unsigned ext1 : 1;
++ unsigned uart1 : 1;
++ unsigned uart2 : 1;
++ unsigned uart3 : 1;
++ unsigned wifi : 1; /* wifi use UART1's pin as wakeup source */
++ unsigned mmc1_cd : 1;
++ unsigned mmc2_cd : 1;
++ unsigned mmc3_cd : 1;
++ unsigned mmc1_dat1 : 1;
++ unsigned mmc2_dat1 : 1;
++ unsigned mmc3_dat1 : 1;
++ unsigned mkey : 1;
++ unsigned usbotg : 1;
++ unsigned mlcd : 1;
++ unsigned dkey : 1;
++ unsigned usb2 : 1; /* USB 2.0 client */
++ unsigned usbh : 1; /* USB Host Port 1 */
++ unsigned msl : 1;
++ unsigned tsi : 1;
++ unsigned ost : 1;
++ unsigned rtc : 1;
++ unsigned eth : 1;
++ unsigned onkey : 1; /* pmic wakeup resources */
++ unsigned usbc : 1; /* USB client for cable and charger */
++ unsigned bat_full : 1; /* battery full */
++ unsigned bat_low : 1; /* battery low */
++ unsigned bt : 1; /* bluetooth use STRXD pin */
++ unsigned cmwdt : 1;
++ unsigned psensor : 1; /* psensor */
++ } bits;
++} pm_wakeup_src_t;
++
++
++#ifdef __KERNEL__
++struct intc_regs {
++ unsigned int iccr;
++ unsigned int ipr[32];
++ unsigned int ipr2[21];
++ unsigned int icmr;
++ unsigned int icmr2;
++ unsigned int iclr;
++ unsigned int iclr2;
++};
++
++struct clock_regs {
++ unsigned int aicsr;
++ unsigned int ckena;
++ unsigned int ckenb;
++ unsigned int oscc;
++};
++
++struct ost_regs {
++ unsigned int ossr;
++ unsigned int oier;
++ unsigned int oscr;
++ unsigned int oscr4;
++ unsigned int osmr4;
++ unsigned int omcr4;
++};
++
++struct rtc_regs {
++ unsigned int rtsr;
++ unsigned int piar;
++};
++
++struct smc_regs {
++ unsigned char __iomem *membase;
++ unsigned int msc0;
++ unsigned int msc1;
++ unsigned int mecr;
++ unsigned int sxcnfg;
++ unsigned int mcmem0;
++ unsigned int mcatt0;
++ unsigned int mcio0;
++ unsigned int memclkcfg;
++ unsigned int cscfg0;
++ unsigned int cscfg1;
++ unsigned int cscfg2;
++ unsigned int cscfg3;
++ unsigned int cscfg_p;
++ unsigned int csmscfg;
++};
++
++struct arb_regs {
++ unsigned char __iomem *membase;
++ unsigned int ctl1;
++ unsigned int ctl2;
++};
++
++struct pmu_regs {
++ unsigned int pcfr;
++ unsigned int pecr;
++ unsigned int pvcr;
++};
++
++#define MAX_MFP_PINS 419
++
++struct mfp_regs {
++ unsigned int mfp[MAX_MFP_PINS];
++};
++
++struct gpio_regs {
++ unsigned int gplr0;
++ unsigned int gplr1;
++ unsigned int gplr2;
++ unsigned int gplr3;
++ unsigned int gpdr0;
++ unsigned int gpdr1;
++ unsigned int gpdr2;
++ unsigned int gpdr3;
++ unsigned int grer0;
++ unsigned int grer1;
++ unsigned int grer2;
++ unsigned int grer3;
++ unsigned int gfer0;
++ unsigned int gfer1;
++ unsigned int gfer2;
++ unsigned int gfer3;
++};
++
++struct pm_save_data {
++ u32 checksum;
++ u32 wordCount;
++ u32 areaAddress;
++ u32 modeSaveFlags;
++ /* current mode registers cpsr, sprsr, r0-r12, lr, sp */
++ u32 ENTRY_REGS[17];
++ /* SYS mode registers:sp, lr */
++ u32 SYS_REGS[2];
++ /* FIQ mode registers:spsr, r8-r12, sp, lr */
++ u32 FIQ_REGS[8];
++ /* IRQ mode registers:spsr, sp, lr */
++ u32 IRQ_REGS[3];
++ /* ABT mode registers:spsr, sp, lr */
++ u32 ABT_REGS[3];
++ /* UND mode registers:spsr, sp, lr */
++ u32 UND_REGS[3];
++ /* SVC mode registers:spsr, sp, lr */
++ u32 SVC_REGS[3];
++ /* MMU registers */
++ u32 CP15_ACR_MMU;
++ u32 CP15_AUXCR_MMU;
++ u32 CP15_TTBR_MMU;
++ u32 CP15_DACR_MMU;
++ u32 CP15_PID_MMU;
++ u32 CP15_CPAR;
++
++ u32 extendedChecksumByteCount;
++ u32 psprAddress;
++ void (*flushFunc)(void);
++ /* the parameter is the reserved bytes from 0x5c010000 */
++ /* It returns the physical address of initlization code in SRAM */
++};
++
++struct pxa3xx_pm_regs {
++ /* It's used to save core registers. */
++ struct pm_save_data pm_data;
++ struct mfp_regs mfp;
++ struct gpio_regs gpio;
++ struct intc_regs intc;
++ struct clock_regs clock;
++ struct ost_regs ost;
++ struct rtc_regs rtc;
++ struct smc_regs smc;
++ struct arb_regs arb;
++ struct pmu_regs pmu;
++ /* It's the virtual address of ISRAM that can be accessed by kernel.
++ */
++ void *sram_map;
++ /* It's used to save ISRAM data. */
++ void *sram;
++ /* It's used to save OBM that loaded from NAND flash. */
++ void *obm;
++ /* It's the address of DDR that stores key information.
++ * Two words are used from the address.
++ */
++ void *data_pool;
++ unsigned int word0;
++ unsigned int word1;
++};
++
++extern pm_wakeup_src_t wakeup_src;
++
++struct pxa3xx_peripheral_wakeup_ops {
++ int (*init)(pm_wakeup_src_t *src);
++ int (*query)(unsigned int reg, pm_wakeup_src_t *src);
++ int (*ext)(pm_wakeup_src_t src, int enable);
++ int (*key)(pm_wakeup_src_t src, int enable);
++ int (*mmc)(pm_wakeup_src_t src, int enable);
++ int (*uart)(pm_wakeup_src_t src, int enable);
++ int (*eth)(pm_wakeup_src_t src, int enable);
++ int (*tsi)(pm_wakeup_src_t src, int enable);
++ int (*usb)(pm_wakeup_src_t src, int enable);
++ int (*cmwdt)(pm_wakeup_src_t src, int enable);
++ int (*psensor)(pm_wakeup_src_t src, int enable);
++};
++
++extern int pxa3xx_wakeup_register(struct pxa3xx_peripheral_wakeup_ops *);
++extern void pxa3xx_lock_suspend(void);
++extern void pxa3xx_unlock_suspend(void);
++extern void pxa3xx_lock_suspend_cancel(void);
++#endif
++#endif
++
++#endif
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/pxa3xx_pmic.h kernel/arch/arm/mach-pxa/include/mach/pxa3xx_pmic.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/pxa3xx_pmic.h 2009-12-13 12:59:52.402368878 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/pxa3xx_pmic.h 2009-12-12 16:09:26.462949527 +0200
+@@ -0,0 +1,194 @@
++#ifndef __PMIC_H__
++#define __PMIC_H__
++
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++
++/* this enum should be consistent with micco_power_module[]
++ * in arch/arm/mach-pxa/xxx(platform).c */
++enum {
++ /* Set command > 0xFFFF0000 to avoid wrong
++ * parameter is used for pmic_set_voltage.
++ */
++ VCC_CORE = 0xFFFF0000,
++ VCC_SRAM,
++ VCC_MVT,
++ VCC_3V_APPS,
++ VCC_SDIO,
++ VCC_CAMERA_ANA,
++ VCC_USB,
++ VCC_LCD,
++ VCC_TSI,
++ VCC_CAMERA_IO,
++ VCC_1P8V,
++ VCC_MEM,
++ HDMI_TX,
++ TECH_3V,
++ TECH_1P8V,
++
++ /* add your command here */
++ VCC_BT,
++ VCC_JOGBALL,
++ VCC_BTWIFFSHARE,
++ VCC_LCD_IO,
++ VCC_TOUCHKEY,
++ /* max command */
++ MAX_CMD,
++};
++
++enum {
++ LED_BACKLIGHT = 0xFFFF0000,
++ LED_VIBRATOR,
++ LED_FLASH,
++ LED_GREEN,
++ LED_RED,
++ LED_BLUE,
++ LED_KEYPAD,
++ LED_MAX_CMD,
++};
++
++#define PMIC_EVENT_EXTON (1 << 0)
++#define PMIC_EVENT_VBUS (1 << 1)
++#define PMIC_EVENT_USB (PMIC_EVENT_EXTON | PMIC_EVENT_VBUS)
++
++#define PMIC_EVENT_TOUCH (1 << 2)
++
++#define PMIC_EVENT_OTGCP_IOVER (1 << 3)
++
++#define PMIC_EVENT_TBAT (1 << 4)
++#define PMIC_EVENT_REV_IOVER (1 << 5)
++#define PMIC_EVENT_IOVER (1 << 6)
++#define PMIC_EVENT_CHDET (1 << 7)
++#define PMIC_EVENT_VBATMON (1 << 8)
++#define PMIC_EVENT_ONKEY (1 << 9)
++
++#ifdef CONFIG_MICCO_HEADSET_DETECT
++#define PMIC_EVENT_HEADSET (1 << 10)
++#define PMIC_EVENT_HOOKSWITCH (1 << 11)
++#endif
++#define PMIC_EVENT_CH_CCTO (1 << 12)
++#define PMIC_EVENT_CH_TCTO (1 << 13)
++
++#define PMIC_EVENT_CHARGER (PMIC_EVENT_TBAT | \
++ PMIC_EVENT_REV_IOVER | \
++ PMIC_EVENT_IOVER | \
++ PMIC_EVENT_CHDET | \
++ PMIC_EVENT_CH_CCTO | \
++ PMIC_EVENT_CH_TCTO | \
++ PMIC_EVENT_VBATMON)
++
++
++#define PMIC_EVENT_USB_IN (1 << 0)
++#define PMIC_EVENT_AC_IN (1 << 1)
++#define PMIC_EVENT_CABLE_OUT (1 << 2)
++#define PMIC_EVENT_CABLE_DETECT (PMIC_EVENT_USB_IN | \
++ PMIC_EVENT_AC_IN | \
++ PMIC_EVENT_CABLE_OUT)
++
++struct pmic_ops {
++ int (*get_voltage)(int cmd, int *pmv);
++ int (*set_voltage)(int cmd, int mv);
++ int (*enable_voltage)(int cmd, int enable);
++ int (*check_voltage)(int cmd);
++ int (*enable_led)(int cmd, int enable);
++ int (*enable_event)(unsigned long, int enable);
++ int (*is_vbus_assert)(void);
++ int (*is_avbusvld)(void);
++ int (*is_asessvld)(void);
++ int (*is_bsessvld)(void);
++ int (*is_srp_ready)(void);
++
++ int (*set_pump)(int enable);
++ int (*set_vbus_supply)(int enable, int srp);
++ int (*set_usbotg_a_mask)(void);
++ int (*set_usbotg_b_mask)(void);
++ int (*is_usbpump_chg)(void);
++
++ int (*is_onkey_assert)(void);
++ int (*is_hookswitch_assert)(void);
++ int (*init)(struct device *dev);
++ int (*deinit)(struct device *dev);
++
++ struct list_head list; /* callback list */
++ spinlock_t cb_lock; /* spinlock for callback list */
++};
++
++struct pmic_callback {
++ unsigned long event; /* the event which callback care about */
++ void (*func)(unsigned long event); /*callback function */
++ struct list_head list;
++};
++
++struct pxa3xx_pmic_regs {
++ unsigned int data:8;
++ unsigned int hit:1;
++ unsigned int mask:1;
++ unsigned int inited:1;
++ unsigned int cacheable:1;
++};
++
++extern void start_calc_time(void);
++extern void end_calc_time(void);
++
++extern int pxa3xx_pmic_write(u8 reg, u8 val);
++extern int pxa3xx_pmic_read(u8 reg, u8 *pval);
++
++extern void pmic_set_ops(struct pmic_ops *ops);
++
++extern int pmic_callback_register(unsigned long event,
++ void (*func)(unsigned long event));
++extern int pmic_callback_unregister(unsigned long event,
++ void (*func)(unsigned long event));
++
++extern int pmic_event_handle(unsigned long event);
++
++extern int pxa3xx_pmic_get_voltage(int cmd, int *pval);
++extern int pxa3xx_pmic_set_voltage(int cmd, int val);
++
++extern int pxa3xx_pmic_check_voltage(int cmd);
++extern int pxa3xx_pmic_enable_voltage(int cmd, int enable);
++extern int pxa3xx_pmic_enable_led(int cmd, int enable);
++/* Check whether USB VBUS is asserted */
++extern int pxa3xx_pmic_is_vbus_assert(void);
++/* Check whether USB VBUS has gone above A-device VBUS valid threshold
++ * Min: 4.4V Max: N/A
++ */
++extern int pxa3xx_pmic_is_avbusvld(void);
++/* Check whether VBUS has gone above A-device Session Valid threshold
++ * Min: 0.8V Max: 2.0V
++ */
++extern int pxa3xx_pmic_is_asessvld(void);
++/* Check whether VBUS has gone above B-device Session Valid threshold
++ * Min: 0.8V Max: 4.0V
++ */
++extern int pxa3xx_pmic_is_bsessvld(void);
++/* Check whether VBUS has gone above B-device Session End threshold
++ * Min: 0.2V Max: 0.8V
++ */
++extern int pxa3xx_pmic_is_srp_ready(void);
++/* Initialize the USB PUMP */
++extern int pxa3xx_pmic_set_pump(int enable);
++/* Check the events change of PMIC */
++extern int pxa3xx_pmic_event_change(void);
++/* enable/disable VBUS supply */
++extern int pxa3xx_pmic_set_vbus_supply(int enable, int srp);
++/* Set events mask for USB A-device
++ * A-device Sessino Valid event
++ * A-device VBUS Valid event
++ */
++extern int pxa3xx_pmic_set_usbotg_a_mask(void);
++/* Set events mask for USB B-device
++ * B-device Session Valid event
++ * B-device Session end event
++ */
++extern int pxa3xx_pmic_set_usbotg_b_mask(void);
++
++extern int pxa3xx_pmic_is_onkey_assert(void);
++
++extern int pxa3xx_pmic_is_hookswitch_assert(void);
++
++extern int px3xx_pmic_event_enable(unsigned long event, int enable);
++
++
++#endif
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/sgh_msm6k.h kernel/arch/arm/mach-pxa/include/mach/sgh_msm6k.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/sgh_msm6k.h 2009-12-13 12:59:59.879036795 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/sgh_msm6k.h 2009-12-12 16:09:26.466285980 +0200
+@@ -0,0 +1,35 @@
++#ifndef __SGH_MSM6K__
++#define __SGH_MSM6K__
++
++#define CH_RPC 0
++
++enum RPC_PKT_READ_TYPE {
++ RPC_INDICATOR=1,
++ RPC_RESPONSE,
++ RPC_NOTIFICATION,
++};
++
++enum RPC_PKT_WRITE_TYPE {
++ RPC_EXECUTE=1,
++ RPC_GET,
++ RPC_SET,
++ RPC_CFRM,
++};
++
++#define RPC_GSM_CALL_INCOMING 0x0202
++#define RPC_GSM_CALL_STATUS 0x0205
++
++#define RPC_GSM_SEC_PIN_STATUS 0x0501
++#define RPC_GSM_SEC_PHONE_LOCK 0x0502
++#define RPC_GSM_SEC_LOCK_INFOMATION 0x0508
++
++#define RPC_GSM_SS_INFO 0x0c06
++
++extern void smd_init(void);
++extern int smd_read(int ch, void* buf, int len);
++extern int smd_write(int ch, void *_buf, int len);
++extern void smd_phone_power(int on);
++
++extern void rpc_init(void);
++
++#endif
+diff -ur linux-2.6.32/arch/arm/mach-pxa/include/mach/xscale-pmu.h kernel/arch/arm/mach-pxa/include/mach/xscale-pmu.h
+--- linux-2.6.32/arch/arm/mach-pxa/include/mach/xscale-pmu.h 2009-12-13 13:00:05.321944499 +0200
++++ kernel/arch/arm/mach-pxa/include/mach/xscale-pmu.h 2009-12-12 16:09:26.469613568 +0200
+@@ -0,0 +1,66 @@
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ *
++ * (C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#ifndef _XSCALE_PMU_H_
++#define _XSCALE_PMU_H_
++
++#include <linux/types.h>
++
++/*
++ * Different types of events that can be counted by the XScale PMU
++ */
++#define EVT_ICACHE_MISS 0x00
++#define EVT_ICACHE_NO_DELIVER 0x01
++#define EVT_DATA_STALL 0x02
++#define EVT_ITLB_MISS 0x03
++#define EVT_DTLB_MISS 0x04
++#define EVT_BRANCH 0x05
++#define EVT_BRANCH_MISS 0x06
++#define EVT_INSTRUCTION 0x07
++#define EVT_DCACHE_FULL_STALL 0x08
++#define EVT_DCACHE_FULL_STALL_CONTIG 0x09
++#define EVT_DCACHE_ACCESS 0x0A
++#define EVT_DCACHE_MISS 0x0B
++#define EVT_DCACE_WRITE_BACK 0x0C
++#define EVT_PC_CHANGED 0x0D
++#define EVT_BCU_REQUEST 0x10
++#define EVT_BCU_FULL 0x11
++#define EVT_BCU_DRAIN 0x12
++#define EVT_BCU_ECC_NO_ELOG 0x14
++#define EVT_BCU_1_BIT_ERR 0x15
++#define EVT_RMW 0x16
++
++struct pmu_results
++{
++ u32 ccnt_of;
++ u32 ccnt; /* Clock Counter Register */
++ u32 pmn0_of;
++ u32 pmn0; /* Performance Counter Register 0 */
++ u32 pmn1_of;
++ u32 pmn1; /* Performance Counter Register 1 */
++ u32 pmn2_of;
++ u32 pmn2; /* Performance Counter Register 2 */
++ u32 pmn3_of;
++ u32 pmn3; /* Performance Counter Register 3 */
++};
++
++#ifdef __KERNEL__
++
++extern struct pmu_results results;
++
++int pmu_claim(void); /* Claim PMU for usage */
++int pmu_start(u32, u32, u32, u32); /* Start PMU execution */
++int pmu_stop(struct pmu_results *); /* Stop perfmon unit */
++int pmu_release(int); /* Release PMU */
++#endif
++
++#endif /* _XSCALE_PMU_H_ */
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/pmu.c kernel/arch/arm/mach-pxa/pmu.c
+--- linux-2.6.32/arch/arm/mach-pxa/pmu.c 2009-12-13 13:00:12.875282685 +0200
++++ kernel/arch/arm/mach-pxa/pmu.c 2009-12-12 16:09:26.479614367 +0200
+@@ -0,0 +1,183 @@
++/*
++ * "This software program is available to you under a choice of one of two
++ * licenses. You may choose to be licensed under either the GNU General Public
++ * License (GPL) Version 2, June 1991, available at
++ * http://www.fsf.org/copyleft/gpl.html, or the BSD License, the text of
++ * which follows:
++ *
++ * Copyright (c) 1996-2005, Intel Corporation. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * Redistributions of source code must retain the above copyright notice, this
++ * list of conditions and the following disclaimer.
++ *
++ * Redistributions in binary form must reproduce the above copyright notice, this
++ * list of conditions and the following disclaimer in the documentation and/or
++ * other materials provided with the distribution.
++ *
++ * Neither the name of the Intel Corporation ("Intel") nor the names of its
++ * contributors may be used to endorse or promote products derived from this
++ * software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
++ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
++ */
++
++/*
++ * FILENAME: pmu.c
++ *
++ * CORE STEPPING:
++ *
++ * PURPOSE: contains all PMU C function.
++ *
++ * (C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <mach/pmu.h>
++#include <asm/types.h>
++#include <mach/pxa3xx-regs.h>
++#include <mach/hardware.h>
++#include <asm/io.h>
++
++static struct pxa3xx_pmu_info *pmu_info;
++
++/*
++ * Select one event including PMU and PML envent for PMU counter
++ *
++ * @par
++ * This function selects one event including Manzano and Monahans event.
++ * When type is Monahans PML Event, it is Monahans PML Event Number OR
++ * PXA3xx_EVENT_MASK. Other words, when type is Manzano event, bit31 is
++ * zero. When type is Monahans PML Event, bit31 is one.
++ * @par
++ * We only use Monahans PML first four event selectors because manzano
++ * has only 4 counters and every selector can choose all PML events.
++ * We use 1:1 map from PMU counter to PML selector. So counter 0 use
++ * PML_SEL0, counter1 use PML_SEL1 and so on.
++ * @param
++ * counter PMU Counter Number. It must be between 0 and 3
++ * type PMU And PML type
++ * @return
++ * old event type before call this function.
++ * @remarks
++ * required kernel/supervisor mode
++ */
++int pmu_select_event(int counter, int type)
++{
++ u32 oldevent, value, pmuevent, shift;
++
++ if(counter < 0 || counter > 3) {
++ return PMU_EVENT_INVALIDATE;
++ }
++ shift = counter * 8;
++
++ value = pmu_read_reg((u32)PMU_EVTSEL);
++ pmuevent = (value >> shift) & 0xFF;
++
++ if (pmuevent >= PMU_EVENT_ASSP_0 && pmuevent <= PMU_EVENT_ASSP_3) {
++ oldevent = PXA3xx_EVENT_MASK |
++ (*(pmu_info->pmu_base + (counter << 2)));
++ } else {
++ oldevent = pmuevent;
++ }
++
++ if(type & PXA3xx_EVENT_MASK) {
++ /* PML Event */
++ value &= ~(0xFF << shift);
++ value |= (PMU_EVENT_ASSP_0 + counter) << shift;
++ *(pmu_info->pmu_base + (counter << 2)) =
++ type & (~PXA3xx_EVENT_MASK);
++ } else {
++ /* PMU Event */
++ value &= ~(0xFF << shift);
++ value |= (type & 0xFF) << shift;
++ }
++ pmu_write_reg((u32)PMU_EVTSEL, value);
++
++ return oldevent;
++}
++
++#ifdef CONFIG_PM
++static int pxa3xx_pmu_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ return 0;
++}
++
++static int pxa3xx_pmu_resume(struct platform_device *pdev)
++{
++ return 0;
++}
++#else
++#define pxa3xx_pmu_suspend NULL
++#define pxa3xx_pmu_resume NULL
++#endif
++
++static int __init pxa3xx_pmu_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++
++ pmu_info = kzalloc(sizeof(struct pxa3xx_pmu_info), GFP_KERNEL);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu_regs");
++ if (!res) goto err;
++ pmu_info->pmu_base = ioremap(res->start, res->end - res->start + 1);
++ return 0;
++err:
++ printk("pxa3xx PMU init failed\n");
++ return -EIO;
++}
++
++static int pxa3xx_pmu_remove(struct platform_device *pdev)
++{
++ struct resource *res;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu_regs");
++ if (!res) goto err;
++ iounmap(pmu_info->pmu_base);
++ kfree(pmu_info);
++ return 0;
++err:
++ printk("pxa3xx PMU remove failed\n");
++ return -EIO;
++}
++
++static struct platform_driver pxa3xx_pmu_driver = {
++ .driver = {
++ .name = "pxa3xx-pmu",
++ },
++ .probe = pxa3xx_pmu_probe,
++ .remove = pxa3xx_pmu_remove,
++#ifdef CONFIG_PM
++ .suspend = pxa3xx_pmu_suspend,
++ .resume = pxa3xx_pmu_resume,
++#endif
++};
++
++static int __init pxa3xx_pmu_init(void)
++{
++ return platform_driver_register(&pxa3xx_pmu_driver);
++}
++
++static void __exit pxa3xx_pmu_exit(void)
++{
++ platform_driver_unregister(&pxa3xx_pmu_driver);
++}
++
++module_init(pxa3xx_pmu_init);
++module_exit(pxa3xx_pmu_exit);
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/pmu_ll.S kernel/arch/arm/mach-pxa/pmu_ll.S
+--- linux-2.6.32/arch/arm/mach-pxa/pmu_ll.S 2009-12-13 13:00:17.648612716 +0200
++++ kernel/arch/arm/mach-pxa/pmu_ll.S 2009-12-12 16:09:26.479614367 +0200
+@@ -0,0 +1,204 @@
++@ "This software program is available to you under a choice of one of two
++@ licenses. You may choose to be licensed under either the GNU General Public
++@ License (GPL) Version 2, June 1991, available at
++@ http://www.fsf.org/copyleft/gpl.html, or the BSD License, the text of
++@ which follows:
++@
++@ Copyright (c) 1996-2005, Intel Corporation. All rights reserved.
++@
++@ Redistribution and use in source and binary forms, with or without
++@ modification, are permitted provided that the following conditions are met:
++@
++@ Redistributions of source code must retain the above copyright notice, this
++@ list of conditions and the following disclaimer.
++@
++@ Redistributions in binary form must reproduce the above copyright notice, this
++@ list of conditions and the following disclaimer in the documentation and/or
++@ other materials provided with the distribution.
++@
++@ Neither the name of the Intel Corporation ("Intel") nor the names of its
++@ contributors may be used to endorse or promote products derived from this
++@ software without specific prior written permission.
++@
++@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++@ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++@ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++@ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
++@ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++@ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
++@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
++@
++@ FILENAME: pmu_ll.S
++@
++@ PURPOSE: Provides low level PMU primitive functions written specifically for
++@ the Bulverde/Mainstone processor/platform. Specially design to fit
++@ into Intel VTUNE Architecture
++@
++@
++@ LAST MODIFIED: 10/31/02
++@******************************************************************************
++@
++@
++@ List of primitive functions in this source code include:
++@
++ .global pmu_read_reg
++ .global pmu_write_reg
++
++ .text
++
++@
++@ pmu_read_reg - Read the PMU Register
++@
++@ Description:
++@ This routine reads the designated PMU register via CoProcesser 14.
++@
++@ Input Parameters:
++@ r0 - arg1, PMU register number to read. Number between 0 to 8
++@ if r0 contains:
++@ 0 -> PMNC, PMU Control Register
++@ 1 -> CCNT, PMU Clock Counter
++@ 2 -> PMN0, PMU Count Register 0
++@ 3 -> PMN1, PMU Count Register 1
++@ 4 -> PMN2, PMU Count Register 2
++@ 5 -> PMN3, PMU Count Register 3
++@ 6 -> INTEN, PMU Interupt Enable Register
++@ 7 -> FLAG, PMU Overflow Flag Status Register
++@ 8 -> EVTSEL PMU Event Select Register
++@
++@ Returns:
++@ r0 - 32-bit value read from CoProcessor
++@
++@ Registers Modified:
++@ CoProcessor Register Modified: None
++@ General Purpose Registers Modified: r0
++@
++@ NOTE:
++@ Currently not support THUMB mode
++@ Error checking not included
++
++pmu_read_reg:
++
++ cmp r0, #8
++ addls pc, pc, r0, lsl #2
++ b RRet
++ b RdPMNC
++ b RdCCNT
++ b RdPMN0
++ b RdPMN1
++ b RdPMN2
++ b RdPMN3
++ b RdINTEN
++ b RdFLAG
++ b RdEVTSEL
++
++RdPMNC:
++ mrc p14, 0, r0, c0, c1, 0 @ Read PMNC
++ b RRet
++RdCCNT:
++ mrc p14, 0, r0, c1, c1, 0 @ Read CCNT
++ b RRet
++RdPMN0:
++ mrc p14, 0, r0, c0, c2, 0 @ Read PMN0
++ b RRet
++RdPMN1:
++ mrc p14, 0, r0, c1, c2, 0 @ Read PMN1
++ b RRet
++RdPMN2:
++ mrc p14, 0, r0, c2, c2, 0 @ Read PMN2
++ b RRet
++RdPMN3:
++ mrc p14, 0, r0, c3, c2, 0 @ Read PMN3
++ b RRet
++RdINTEN:
++ mrc p14, 0, r0, c4, c1, 0 @ Read INTEN
++ b RRet
++RdFLAG:
++ mrc p14, 0, r0, c5, c1, 0 @ Read FLAG
++ b RRet
++RdEVTSEL:
++ mrc p14, 0, r0, c8, c1, 0 @ Read EVTSEL
++
++RRet:
++ mov pc, lr @ return
++
++
++@
++@ pmu_write_reg - Writes to the PMU Register
++@
++@ Description:
++@ This routine writes to the designated PMU register via CoProcesser 14.
++@
++@ Input Parameters:
++@ r0 - arg1 - PMU register number to write
++@ r1 - arg2 - Value to write to PMU register
++@
++@ if r0 contains:
++@ 0 -> PMNC, PMU Control Register
++@ 1 -> CCNT, PMU Clock Counter
++@ 2 -> PMN0, PMU Count Register 0
++@ 3 -> PMN1, PMU Count Register 1
++@ 4 -> PMN2, PMU Count Register 2
++@ 5 -> PMN3, PMU Count Register 3
++@ 6 -> INTEN, PMU Interupt Enable Register
++@ 7 -> FLAG, PMU Overflow Flag Status Register
++@ 8 -> EVTSEL PMU Event Select Register
++@
++@ Returns:
++@ None
++@
++@ Registers Modified:
++@ CoProcessor Register Modified: PMU Register
++@ General Purpose Registers Modified: None
++@
++@NOTE:
++@ Currently not support THUMB mode
++@ Error checking not included
++
++pmu_write_reg:
++
++ cmp r0, #8
++ addls pc, pc, r0, lsl #2
++ b WRet
++ b WrPMNC
++ b WrCCNT
++ b WrPMN0
++ b WrPMN1
++ b WrPMN2
++ b WrPMN3
++ b WrINTEN
++ b WrFLAG
++ b WrEVTSEL
++
++WrPMNC:
++ mcr p14, 0, r1, c0, c1, 0 @ Write PMNC
++ b WRet
++WrCCNT:
++ mcr p14, 0, r1, c1, c1, 0 @ Write CCNT
++ b WRet
++WrPMN0:
++ mcr p14, 0, r1, c0, c2, 0 @ Write PMN0
++ b WRet
++WrPMN1:
++ mcr p14, 0, r1, c1, c2, 0 @ Write PMN1
++ b WRet
++WrPMN2:
++ mcr p14, 0, r1, c2, c2, 0 @ Write PMN2
++ b WRet
++WrPMN3:
++ mcr p14, 0, r1, c3, c2, 0 @ Write PMN3
++ b WRet
++WrINTEN:
++ mcr p14, 0, r1, c4, c1, 0 @ Write INTEN
++ b WRet
++WrFLAG:
++ mcr p14, 0, r1, c5, c1, 0 @ Write FLAG
++ b WRet
++WrEVTSEL:
++ mcr p14, 0, r1, c8, c1, 0 @ Write EVTSEL
++
++WRet:
++ mov pc, lr @ return
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/prm.c kernel/arch/arm/mach-pxa/prm.c
+--- linux-2.6.32/arch/arm/mach-pxa/prm.c 2009-12-13 13:00:22.645696759 +0200
++++ kernel/arch/arm/mach-pxa/prm.c 2009-12-12 16:09:26.479614367 +0200
+@@ -0,0 +1,1266 @@
++/*
++ * Monahans Profiler Resource Manager
++ *
++ * Copyright (C) 2004, Intel Corporation(chao.xie@intel.com).
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++ *
++ *(C) Copyright 2006 Marvell International Ltd.
++ * All Rights Reserved
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <asm/current.h>
++#include <linux/proc_fs.h>
++#include <linux/rwsem.h>
++#include <linux/interrupt.h>
++#include <mach/prm.h>
++#include <mach/pmu.h>
++#include <mach/dvfm.h>
++#include <mach/pxa3xx_dvfm.h>
++
++/*#define DEBUG
++ */
++
++#ifdef DEBUG
++#define DPRINTK(fmt,args...) \
++ do { printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args); } while (0)
++#else
++#define DPRINTK(fmt,args...) do {} while (0)
++#endif
++
++#define ASSERT_PRIORITY(pri) \
++ if ((pri) < PRI_LOWEST || (pri) > PRI_HIGHEST) \
++ return -EINVAL;
++#define ASSERT_CLIENT_ID(client) \
++ if ((client) < 0 || (client) > MAX_CLIENTS) \
++ return -EINVAL;
++#define ASSERT_RESOURCE_ID(resource) \
++ if ((resource) < 0 || (resource) > RESOURCE_NUM) \
++ return -EINVAL;
++#define ASSERT_GROUP_ID(group) \
++ if ((group) < 0 || (group) > MAX_GROUPS) \
++ return -EINVAL;
++
++#define IS_PRM_RESOURCE(reg) ((reg) >= PMU_CCNT && (reg) <= PMU_PMN3)
++#define PMU_PRM(reg) (reg - 1)
++
++#define IS_HIGHER_PRIORITY(h1, h2) ((h1) > (h2))
++#define for_each_lower_priority(index, pri) \
++ for (index = pri - 1; index >= PRI_LOWEST; index = index - 1)
++
++#define STATE_UNDEF 0x1
++#define STATE_ACTIVE 0x2
++#define STATE_APPROPRIATED 0x3
++
++static struct prm_resource prm_resources[RESOURCE_NUM];
++static struct prm_client *prm_clients[MAX_CLIENTS];
++static struct prm_client *prm_pmu_client;
++
++struct rw_semaphore prm_sem;
++
++#ifdef DEBUG
++static struct proc_dir_entry *clients_root;
++static struct proc_dir_entry *resources_root;
++static struct proc_dir_entry *prm_root;
++
++#define proc_dump_end(len, page, start, off, count, eof) \
++do { \
++ if (len <= off + count) *eof = 1; \
++ *start = page + off; \
++ len -= off; \
++ if (len > count) len = count; \
++ if (len < 0) len = 0; \
++} while (0)
++
++static int dump_group(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ char *buf = page;
++ int len;
++ struct prm_group *group = (struct prm_group *)data;
++
++ buf += sprintf(buf, "address: 0x%x\n member_cnt: %u\n"
++ " appropriated_cnt: %u\n",
++ (unsigned int)group, group->member_cnt,
++ group->appropriated_cnt);
++ len = buf - page;
++ proc_dump_end(len, page, start, off, count, eof);
++ return len;
++}
++
++static int dump_resource_state(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int client_id = -1, group_id = -1;
++ int i, len;
++ char *buf = page;
++ struct prm_resource_state *state = (struct prm_resource_state *)data;
++
++ if (state->allocate) {
++ for (i = 0; i < MAX_CLIENTS; i++) {
++ if (prm_clients[i] && prm_clients[i] == state->allocate) {
++ client_id = i;
++ break;
++ }
++ }
++ for (i = 0; i < MAX_GROUPS; i++) {
++ if (state->allocate->groups[i] &&
++ state->allocate->groups[i] == state->group) {
++ group_id = i;
++ break;
++ }
++ }
++ }
++ buf += sprintf(buf, "allocate: 0x%x(%d)\n group: 0x%x(%d)\n"
++ " active: %u\n resource: 0x%x\n",
++ (unsigned int)state->allocate, client_id,
++ (unsigned int)state->group,
++ group_id, state->active, (unsigned int)state->resource);
++ len = buf - page;
++ proc_dump_end(len, page, start, off, count, eof);
++ return len;
++}
++
++static int dump_client(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int i, len;
++ char *buf = page;
++ struct prm_client *client = (struct prm_client *)data;
++
++ buf += sprintf(buf, "address: 0x%x\n id: %u\n pid: %u\n"
++ " priority: %u\n name: %s\n group_cnt: %u\n",
++ (unsigned int)client, client->id, client->pid,
++ client->priority, client->name, client->group_cnt);
++ for (i = 0 ;i < MAX_GROUPS; i++) {
++ if (client->groups[i])
++ buf += sprintf(buf, "group%u address: 0x%x\n",
++ i, (unsigned int)client->groups[i]);
++ }
++ len = buf - page;
++ proc_dump_end(len, page, start, off, count, eof);
++ return len;
++}
++
++static int dump_resource(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int i, client_id = -1, len;
++ char *buf = page;
++ struct prm_resource *resource = (struct prm_resource *)data;
++
++ for (i = 0; i < MAX_CLIENTS; i++) {
++ if (prm_clients[i] && prm_clients[i] == resource->access) {
++ client_id = i;
++ break;
++ }
++ }
++ buf += sprintf(buf, " address:0x%x\n access: 0x%x(%d)\n id: %u\n",
++ (unsigned int)resource, (unsigned int)resource->access,
++ client_id, resource->id);
++ len = buf - page;
++ proc_dump_end(len, page, start, off, count, eof);
++ return len;
++}
++
++static void proc_add_client(struct prm_client *client)
++{
++ char buf[16];
++
++ sprintf(buf, "client%d", client->id);
++ client->dir = proc_mkdir(buf, clients_root);
++ create_proc_read_entry("info", 0, client->dir, dump_client, client);
++}
++
++static void proc_del_client(struct prm_client *client)
++{
++ char buf[16];
++
++ remove_proc_entry("info", client->dir);
++ sprintf(buf, "client%d", client->id);
++ remove_proc_entry(buf, clients_root);
++}
++
++static void proc_add_group(struct prm_client *client,
++ struct prm_group *group, unsigned int group_id)
++{
++ char buf[16];
++
++ sprintf(buf, "group%d", group_id);
++ group->dir = proc_mkdir(buf, client->dir);
++ create_proc_read_entry("info", 0, group->dir, dump_group, group);
++}
++
++static void proc_del_group(struct prm_client*client,
++ struct prm_group *group, unsigned int group_id)
++{
++ char buf[32];
++
++ remove_proc_entry("info", group->dir);
++ sprintf(buf, "group%d", group_id);
++ remove_proc_entry(buf, client->dir);
++}
++
++static void proc_add_resource(struct prm_resource *resource)
++{
++ char buf[16];
++
++ sprintf(buf, "resource%d", resource->id);
++ resource->dir = proc_mkdir(buf, resources_root);
++ create_proc_read_entry("info", 0, resource->dir,
++ dump_resource, resource);
++}
++
++static void proc_del_resource(struct prm_resource *resource)
++{
++ char buf[16];
++
++ remove_proc_entry("info", resource->dir);
++ sprintf(buf, "resource%d", resource->id);
++ remove_proc_entry(buf, resources_root);
++}
++
++static void proc_add_resource_state(struct prm_resource_state *state,
++ unsigned int priority)
++{
++ char buf[16];
++
++ sprintf(buf, "state%d", priority);
++ state->dir = proc_mkdir(buf, state->resource->dir);
++ create_proc_read_entry("info", 0, state->dir,
++ dump_resource_state, state);
++}
++
++static void proc_del_resource_state(struct prm_resource_state *state,
++ unsigned int priority)
++{
++ char buf[16];
++
++ remove_proc_entry("info", state->dir);
++ sprintf(buf, "state%d", priority);
++ remove_proc_entry(buf, state->resource->dir);
++}
++
++static void proc_commit_resource(struct prm_resource *resource)
++{
++ char buf[32];
++
++ remove_proc_entry("access", resource->dir);
++ sprintf(buf, "/proc/prm/clients/%s", resource->access->dir->name);
++ proc_symlink("access", resource->dir, buf);
++}
++
++static void proc_allocate_resource(struct prm_resource_state *state)
++{
++ char buf[32], path[64];
++
++ remove_proc_entry("allocate", state->dir);
++
++ sprintf(path, "/proc/prm/clients/%s/%s",
++ state->allocate->dir->name, state->group->dir->name);
++ proc_symlink("group", state->dir, path);
++ sprintf(buf, "resource%d_state%d",
++ state->resource->id, state->allocate->priority);
++ sprintf(path, "/proc/prm/resources/%s/%s",
++ state->resource->dir->name, state->dir->name);
++ proc_symlink(buf, state->group->dir, path);
++}
++
++static void proc_free_resource(struct prm_resource_state *state)
++{
++ char buf[32];
++
++ sprintf(buf, "resource%d_state%d",
++ state->resource->id, state->allocate->priority);
++ remove_proc_entry("access", state->resource->dir);
++ remove_proc_entry("allocate", state->dir);
++ remove_proc_entry("group", state->dir);
++ remove_proc_entry(buf, state->group->dir);
++}
++
++static void proc_prm_init(void)
++{
++ prm_root = proc_mkdir("prm", NULL);
++ clients_root = proc_mkdir("clients", prm_root);
++ resources_root = proc_mkdir("resources", prm_root);
++}
++
++static void proc_prm_exit(void)
++{
++ remove_proc_entry("clients", prm_root);
++ remove_proc_entry("resources", prm_root);
++ remove_proc_entry("prm", NULL);
++}
++#else
++static void proc_add_client(struct prm_client *client) {}
++static void proc_del_client(struct prm_client *client) {}
++static void proc_add_group(struct prm_client *client,
++ struct prm_group *group, unsigned int group_id) {}
++static void proc_del_group(struct prm_client*client,
++ struct prm_group *group, unsigned int group_id) {}
++static void proc_add_resource(struct prm_resource *resource) {}
++static void proc_del_resource(struct prm_resource *resource) {}
++static void proc_add_resource_state(struct prm_resource_state *state,
++ unsigned int priority) {}
++static void proc_del_resource_state(struct prm_resource_state *state,
++ unsigned int priority) {}
++static void proc_commit_resource(struct prm_resource *resource) {}
++static void proc_allocate_resource(struct prm_resource_state *state) {}
++static void proc_free_resource(struct prm_resource_state *state) {}
++static void proc_prm_init(void) {}
++static void proc_prm_exit(void) {}
++#endif
++
++/*****************************************************************************/
++/* */
++/* Profiler Resource Manager */
++/* */
++/*****************************************************************************/
++
++static void clear_state(struct prm_resource_state *state)
++{
++ state->allocate = NULL;
++ state->group = NULL;
++ state->active = STATE_UNDEF;
++ /* the state has been deleted from the group resource list */
++ INIT_LIST_HEAD(&(state->entry));
++}
++
++static int group_commited(struct prm_client *client,
++ struct prm_group *group)
++{
++ struct prm_resource_state *state;
++ struct prm_resource *resource;
++ struct list_head *pos;
++
++ list_for_each(pos, &(group->resources)) {
++ state = list_entry(pos, struct prm_resource_state, entry);
++ resource = state->resource;
++ if (resource->access != client) {
++ return 0;
++ }
++ }
++ return 1;
++}
++
++static int try_to_access_group(struct prm_client *client,
++ struct prm_group *group, int set_state)
++{
++ struct prm_resource_state *state;
++ struct prm_resource *resource;
++ int ret = 0;
++ struct list_head *pos;
++
++ DPRINTK("client <%d> try to access group <%d>, set_state as <%d>\n",
++ (unsigned int)client->id, (unsigned int)group->id, set_state);
++ list_for_each(pos, &(group->resources)) {
++ state = list_entry(pos, struct prm_resource_state, entry);
++ resource = state->resource;
++ if (resource->access != NULL && resource->access != client &&
++ IS_HIGHER_PRIORITY(resource->access->priority,
++ client->priority)) {
++ if (set_state) {
++ state->active = STATE_APPROPRIATED;
++ group->appropriated_cnt++;
++ }
++ ret++;
++ }
++ }
++ DPRINTK("try_to_access() return :%d\n", ret);
++ return ret;
++}
++
++static struct prm_client * get_resource_access(struct prm_resource *resource)
++{
++ if (resource)
++ return resource->access;
++ else /*for access the isr and control regs of PMU */
++ return (struct prm_client *)(
++ (unsigned long)prm_resources[PRM_CCNT].access &
++ (unsigned long)prm_resources[PRM_PMN0].access &
++ (unsigned long)prm_resources[PRM_PMN1].access &
++ (unsigned long)prm_resources[PRM_PMN2].access &
++ (unsigned long)prm_resources[PRM_PMN3].access
++ );
++}
++
++static void unload_isr(struct prm_client *client)
++{
++ if (prm_pmu_client == client || get_resource_access(NULL) == client)
++ prm_pmu_client = NULL;
++}
++
++static void load_isr(struct prm_client *client)
++{
++ if (prm_pmu_client != client && get_resource_access(NULL) == client)
++ prm_pmu_client = client;
++}
++
++/* this function will be invoked with locked */
++static int set_resource_access(struct prm_resource *resource,
++ struct prm_client *client)
++{
++ struct prm_resource_state *state, *owner_state;
++ struct prm_group *group, *owner_group;
++ struct prm_client *owner;
++ int ret = 0;
++
++ if (client == NULL) {
++ /* The client will free the committed resources to the appropriated
++ * lower client. And notification will be sent so as to give the lower
++ * priority client a chance to commit resources if:
++ * 1. all the resources of the lower priority group that resource
++ * belongs to are committable
++ * 2. lower priority client hasn't committed above group resources
++ * Note: the notified client is unnessarily the appropriated client.
++ */
++ int index;
++
++ DPRINTK("client <%d> give up resource <%d>\n",
++ (unsigned int)resource->access->id, (unsigned int)resource->id);
++ unload_isr(resource->access);
++ owner = resource->access;
++ resource->access = NULL;
++ resource->priority[owner->priority].active = STATE_UNDEF;
++ for_each_lower_priority(index, owner->priority) {
++ state = &(resource->priority[index]);
++ DPRINTK(" its state of lower priority <%d> is <%d>\n",
++ index, state->active);
++ if (state->active == STATE_APPROPRIATED) {
++ DPRINTK("client <%d> return resource <%d>"
++ " to client <%d>\n", owner->id,
++ resource->id, state->allocate->id);
++ group = state->group;
++ group->appropriated_cnt--;
++ DPRINTK("resource group <%d> of client <%d>"
++ " has <%d> resources appropriated\n",
++ group->id, state->allocate->id,
++ group->appropriated_cnt);
++ }
++ if (state->group &&
++ state->group->appropriated_cnt == 0 &&
++ state->allocate &&
++ !group_commited(state->allocate, state->group)) {
++ ret = try_to_access_group(state->allocate,
++ state->group, 1);
++ if (ret < 0)
++ return ret;
++ else if (ret == 0) {
++ /* state->active = STATE_UNDEF;
++ */
++ /* ISR will not reload, because now the
++ * group has not be commited again.
++ * The isr should be loaded when commit
++ */
++ if (state->allocate->notify) {
++ up_write(&prm_sem);
++ DPRINTK("client <%d> notified"
++ " with PRM_RES_READY\n",
++ (unsigned int)state->allocate->id);
++ state->allocate->notify(PRM_RES_READY,
++ state->group->id,
++ state->allocate->client_data);
++ down_write(&prm_sem);
++ }
++ break;
++ }
++ }
++ }
++ }
++ else {
++ struct prm_resource *group_resource;
++ struct list_head *pos;
++
++ owner = resource->access;
++
++ if (owner == client){
++ DPRINTK("client <%d> commits resource <%d>:"
++ " already commited\n",
++ (unsigned int)client->id,
++ (unsigned int)resource->id);
++ return 0;
++ }
++ if (!owner)
++ unload_isr(owner);
++ resource->access = client;
++ resource->priority[client->priority].active = STATE_ACTIVE;
++ load_isr(client);
++ if (owner == NULL) {
++ DPRINTK("client <%d> commits resource <%d>:"
++ " from free list\n",
++ (unsigned int)client->id,
++ (unsigned int)resource->id);
++ return 0;
++ } else {
++ DPRINTK("client <%d> commits resource <%d>:"
++ " from client <%d>\n\n",
++ (unsigned int)client->id,
++ (unsigned int)resource->id, owner->id);
++ }
++
++ owner_state = &(resource->priority[owner->priority]);
++ owner_state->active = STATE_APPROPRIATED;
++ owner_group = owner_state->group;
++ if (owner_group->appropriated_cnt++ == 0) {
++ list_for_each(pos, &(owner_group->resources)) {
++ state = list_entry(pos,
++ struct prm_resource_state, entry);
++ group_resource = state->resource;
++ if (group_resource->access == owner)
++ ret = set_resource_access(
++ group_resource, NULL);
++ if (ret)
++ return ret;
++ }
++ if (owner->notify) {
++ up_write(&prm_sem);
++ DPRINTK("client <%d> notified with"
++ " PRM_RES_APPROPRIATED\n",
++ (unsigned int)owner->id);
++ owner->notify(PRM_RES_APPROPRIATED,
++ owner_group->id, owner->client_data);
++ down_write(&prm_sem);
++ }
++ }
++ }
++ return 0;
++}
++
++int prm_open_session(prm_priority priority, char *name,
++ clientcallback callback, void *data)
++{
++ struct prm_client * client;
++ unsigned int name_len;
++ int i = 0;
++
++ ASSERT_PRIORITY(priority);
++ if (!name) {
++ return -EINVAL;
++ }
++ /* protect for read */
++ down_read(&prm_sem);
++ for (i = 0;i < MAX_CLIENTS;i++) {
++ if (prm_clients[i] == NULL)
++ break;
++ }
++ up_read(&prm_sem);
++
++ if (i == MAX_CLIENTS)
++ return -ENOENT;
++
++ name_len = strlen(name);
++ client = (struct prm_client *)
++ kmalloc(sizeof(struct prm_client) + name_len + 1, GFP_KERNEL);
++ if (!client)
++ return -ENOMEM;
++ memset(client, 0x0, sizeof(struct prm_client));
++ client->id = i;
++ client->pid = current->pid;
++ client->priority = priority;
++ client->notify = callback;
++ client->client_data = data;
++ client->name = (char *)(client + 1);
++ strncpy(client->name, name, name_len);
++ client->name[name_len] = '\0';
++
++ for(i = 0;i < MAX_GROUPS;i++)
++ client->groups[i] = NULL;
++
++ down_write(&prm_sem);
++ if (prm_clients[client->id] != NULL) {
++ up_write(&prm_sem);
++ kfree(client);
++ return -ENOENT;
++ }
++ prm_clients[client->id] = client;
++ up_write(&prm_sem);
++ proc_add_client(client);
++
++ DPRINTK("client<%d>(%s) open a session with priority <%d>\n",
++ client->id, name, priority);
++
++ return client->id;
++}
++
++int prm_close_session(unsigned int client_id)
++{
++ struct prm_client *client;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_write(&prm_sem);
++ client = prm_clients[client_id];
++ /* resources should be freed before close seesion */
++ if (client->group_cnt) {
++ up_write(&prm_sem);
++ return -EPERM;
++ }
++ prm_clients[client_id] = NULL;
++ up_write(&prm_sem);
++ proc_del_client(client);
++ kfree(client);
++
++ DPRINTK("client<%d> closed its session\n", client_id);
++
++ return 0;
++}
++
++/* allocate resource, but can not access it now */
++int prm_allocate_resource(unsigned int client_id,
++ prm_resource_id res_id, unsigned int group_id)
++{
++ struct prm_client *client;
++ struct prm_resource *resource;
++ struct prm_resource_state *state;
++ struct prm_group *group;
++
++ ASSERT_CLIENT_ID(client_id);
++ ASSERT_RESOURCE_ID(res_id);
++ ASSERT_GROUP_ID(group_id);
++
++ DPRINTK("allocate resource for client <%d> with resource <%d>"
++ " for group <%d>\n", client_id, res_id, group_id);
++ down_write(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_write(&prm_sem);
++ return -EINVAL;
++ }
++ resource = &(prm_resources[res_id]);
++ state = &(resource->priority[client->priority]);
++
++ /* The resource in the client->priority has been reserved */
++ if (state->allocate) {
++ up_write(&prm_sem);
++ return -EPERM;
++ }
++ else
++ state->allocate = client;
++ group = client->groups[group_id];
++ up_write(&prm_sem);
++
++ if (group == NULL) {
++ group = (struct prm_group *)
++ kmalloc(sizeof(struct prm_group), GFP_KERNEL);
++ if (group == NULL)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&(group->resources));
++ group->id = group_id;
++ group->appropriated_cnt = 0;
++ group->member_cnt = 1;
++ proc_add_group(client, group, group_id);
++
++ down_write(&prm_sem);
++ if (client->groups[group_id]) {
++ up_write(&prm_sem);
++ kfree(group);
++ down_write(&prm_sem);
++ group = client->groups[group_id];
++ }
++ else {
++ client->groups[group_id] = group;
++ client->group_cnt++;
++ }
++ }
++ else {
++ down_write(&prm_sem);
++ client->groups[group_id]->member_cnt++;
++ }
++ list_add(&(state->entry), &(group->resources));
++ state->group = group;
++ state->active = STATE_UNDEF;
++ up_write(&prm_sem);
++ proc_allocate_resource(state);
++
++ return 0;
++}
++
++int prm_free_resources(unsigned int client_id, unsigned int group_id)
++{
++ struct prm_client *client;
++ struct prm_resource *resource;
++ struct prm_group *group;
++ struct prm_resource_state *state;
++ int ret = -EINVAL;
++ struct list_head *pos, *n;
++
++ ASSERT_CLIENT_ID(client_id);
++ ASSERT_GROUP_ID(group_id);
++
++ down_write(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_write(&prm_sem);
++ return -EINVAL;
++ }
++ group = client->groups[group_id];
++ if (!group) {
++ up_write(&prm_sem);
++ return -EINVAL;
++ }
++
++ list_for_each_safe(pos, n, &(group->resources)) {
++ state = list_entry(pos, struct prm_resource_state, entry);
++ resource = state->resource;
++ if (get_resource_access(resource) == client) {
++ ret = set_resource_access(resource, NULL);
++ if (ret) {
++ up_write(&prm_sem);
++ return ret;
++ }
++ }
++#if 0
++ else if (state->active == STATE_APPROPRIATED)
++ group->appropriated_cnt--;
++#endif
++ proc_free_resource(state);
++ list_del(pos);
++ clear_state(state);
++ }
++ client->group_cnt--;
++ client->groups[group_id] = NULL;
++ up_write(&prm_sem);
++ proc_del_group(client, group, group_id);
++ kfree(group);
++
++ return 0;
++}
++
++int prm_commit_resources(unsigned int client_id, unsigned int group_id)
++{
++ struct prm_client *client;
++ struct prm_group *group;
++ struct prm_resource_state *state;
++ struct prm_resource *resource;
++ struct list_head *pos;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++ ASSERT_GROUP_ID(group_id);
++
++ DPRINTK("client <%d> commit resource group <%d>\n",
++ client_id, group_id);
++ down_write(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_write(&prm_sem);
++ return -EINVAL;
++ }
++ group = client->groups[group_id];
++ if (!group) {
++ up_write(&prm_sem);
++ return -EINVAL;
++ }
++
++ ret = try_to_access_group(client, group, 0);
++ if (ret) {
++ up_write(&prm_sem);
++ return ret;
++ }
++
++ list_for_each(pos, &(group->resources)) {
++ state = list_entry(pos, struct prm_resource_state, entry);
++ resource = state->resource;
++ ret = set_resource_access(resource, client);
++ if (ret) {
++ up_write(&prm_sem);
++ return ret;
++ }
++ proc_commit_resource(resource);
++ }
++ up_write(&prm_sem);
++ return 0;
++}
++
++int prm_get_cpuid(void)
++{
++ int cpu_id;
++
++ asm("mrc p15, 0, %0, c0, c0" : "=r" (cpu_id));
++ cpu_id &= 0xfffff000;
++
++ return cpu_id;
++}
++
++static irqreturn_t prm_pmu_handler(int irq, void *dev_id)
++{
++ /*DPRINTK("PMU interrupt generated!\n");
++ */
++ if (prm_pmu_client)
++ prm_pmu_client->handler(irq, prm_pmu_client->dev_id);
++ return IRQ_HANDLED;
++}
++
++EXPORT_SYMBOL(prm_open_session);
++EXPORT_SYMBOL(prm_close_session);
++EXPORT_SYMBOL(prm_allocate_resource);
++EXPORT_SYMBOL(prm_free_resources);
++EXPORT_SYMBOL(prm_commit_resources);
++EXPORT_SYMBOL(prm_get_cpuid);
++
++/*****************************************************************************/
++/* */
++/* PMU API */
++/* */
++/*****************************************************************************/
++
++int pmu_read_register(unsigned int client_id, int reg, unsigned int *pval)
++{
++ struct prm_resource *resource;
++ struct prm_client *client;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ resource = IS_PRM_RESOURCE(reg)? &(prm_resources[PMU_PRM(reg)]):NULL;
++ ret = (get_resource_access(resource) == client);
++ up_read(&prm_sem);
++
++ if (ret)
++ *pval = pmu_read_reg(reg);
++ else
++ return -EACCES;
++
++ return 0;
++}
++
++int pmu_write_register(unsigned int client_id, int reg, unsigned int val)
++{
++ struct prm_resource *resource;
++ struct prm_client *client;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ resource = IS_PRM_RESOURCE(reg)? &(prm_resources[PMU_PRM(reg)]):NULL;
++ ret = (get_resource_access(resource) == client);
++ up_read(&prm_sem);
++
++ if (ret)
++ pmu_write_reg(reg, val);
++ else
++ return -EACCES;
++
++ return 0;
++}
++
++int pmu_set_event(unsigned int client_id, unsigned int counter,
++ int *pre_type, int type)
++{
++ struct prm_client *client;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ ret = (client == get_resource_access(NULL)) ;
++ up_read(&prm_sem);
++
++ if (ret) {
++ *pre_type = pmu_select_event(counter, type);
++ if (*pre_type == PMU_EVENT_INVALIDATE)
++ return -EINVAL;
++ }
++ else
++ return -EACCES;
++ return 0;
++}
++
++int pmu_enable_event_counting(unsigned int client_id)
++{
++ struct prm_client *client;
++ unsigned long val;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ ret = (client == get_resource_access(NULL));
++ up_read(&prm_sem);
++
++ if (ret) {
++ /* enable and reset all counters,
++ * CCNT counts every clock cycle
++ */
++ val = 0x07;
++ pmu_write_reg(PMU_PMNC, val);
++ }
++ else
++ return -EACCES;
++ return 0;
++}
++
++int pmu_disable_event_counting(unsigned int client_id)
++{
++ struct prm_client *client;
++ unsigned long val;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ ret = (client == get_resource_access(NULL));
++ up_read(&prm_sem);
++
++ if (ret) {
++ /* disable all counters */
++ val = 0x10;
++ pmu_write_reg(PMU_PMNC, val);
++ }
++ else
++ return -EACCES;
++ return 0;
++}
++
++int pmu_enable_event_interrupt(unsigned int client_id, int reg)
++{
++ struct prm_client *client;
++ unsigned long val;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ if (IS_PRM_RESOURCE(reg)) {
++ ret = (get_resource_access(&(prm_resources[PMU_PRM(reg)])) == client);
++ up_read(&prm_sem);
++ if (ret) {
++ val = pmu_read_reg(PMU_INTEN);
++ val |= (0x1 << reg);
++ pmu_write_reg(PMU_INTEN, val);
++ }
++ else
++ return -EACCES;
++ }
++ else {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int pmu_disable_event_interrupt(unsigned int client_id, int reg)
++{
++ struct prm_client *client;
++ unsigned long val;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ if (IS_PRM_RESOURCE(reg)) {
++ ret = (get_resource_access(&(prm_resources[PMU_PRM(reg)])) == client);
++ up_read(&prm_sem);
++ if (ret) {
++ val = pmu_read_reg(PMU_INTEN);
++ val &= ~(0x1 << reg);
++ pmu_write_reg(PMU_INTEN, val);
++ }
++ else
++ return -EACCES;
++ }
++ else {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int pmu_register_isr(unsigned int client_id,
++ irq_handler_t handler, void *dev_id)
++{
++ struct prm_client *client;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ client->handler = handler;
++ client->dev_id = dev_id;
++ load_isr(client);
++ up_read(&prm_sem);
++ return 0;
++}
++
++int pmu_unregister_isr(unsigned int client_id)
++{
++ struct prm_client *client;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ unload_isr(client);
++ client->handler = NULL;
++ client->dev_id = NULL;
++ up_read(&prm_sem);
++ return 0;
++}
++
++EXPORT_SYMBOL(pmu_read_reg);
++EXPORT_SYMBOL(pmu_write_reg);
++EXPORT_SYMBOL(pmu_read_register);
++EXPORT_SYMBOL(pmu_write_register);
++EXPORT_SYMBOL(pmu_set_event);
++EXPORT_SYMBOL(pmu_enable_event_counting);
++EXPORT_SYMBOL(pmu_disable_event_counting);
++EXPORT_SYMBOL(pmu_enable_event_interrupt);
++EXPORT_SYMBOL(pmu_disable_event_interrupt);
++EXPORT_SYMBOL(pmu_register_isr);
++EXPORT_SYMBOL(pmu_unregister_isr);
++
++/*****************************************************************************/
++/* */
++/* COP API */
++/* */
++/*****************************************************************************/
++
++int cop_get_num_of_cops(void)
++{
++ return dvfm_op_count();
++}
++
++int cop_get_cop(unsigned int client_id, unsigned int n,
++ struct pxa3xx_fv_info *param)
++{
++ struct op_info *info = NULL;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ ret = dvfm_get_opinfo(n, &info);
++ if (ret == 0) {
++ md2fvinfo(param, (struct dvfm_md_opt *)info->op);
++ }
++ return ret;
++}
++
++int cop_set_cop(unsigned int client_id, unsigned int n, int mode)
++{
++ struct prm_resource *resource;
++ struct prm_client *client;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ resource = &prm_resources[PRM_COP];
++ ret = (get_resource_access(resource) == client);
++ up_read(&prm_sem);
++
++ if (ret)
++ return dvfm_request_op(n);
++ return -EACCES;
++}
++
++int cop_get_def_cop(unsigned int client_id, unsigned int *n,
++ struct pxa3xx_fv_info *param)
++{
++ struct op_info *info = NULL;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ *n = dvfm_get_defop();
++ ret = dvfm_get_opinfo(*n, &info);
++ if (ret == 0) {
++ md2fvinfo(param, (struct dvfm_md_opt *)info->op);
++ }
++ return ret;
++}
++
++int cop_set_def_cop(unsigned int client_id)
++{
++ struct prm_resource *resource;
++ struct prm_client *client;
++ unsigned int def_op;
++ int ret;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ down_read(&prm_sem);
++ client = prm_clients[client_id];
++ if (!client) {
++ up_read(&prm_sem);
++ return -EINVAL;
++ }
++
++ resource = &prm_resources[PRM_COP];
++ ret = (get_resource_access(resource) == client);
++ up_read(&prm_sem);
++
++ def_op = dvfm_get_defop();
++ if (ret)
++ return dvfm_request_op(def_op);
++ return -EACCES;
++}
++
++int cop_get_cur_cop(unsigned int client_id, unsigned int *n,
++ struct pxa3xx_fv_info *param)
++{
++ struct op_info *info = NULL;
++
++ ASSERT_CLIENT_ID(client_id);
++
++ *n = dvfm_get_op(&info);
++ md2fvinfo(param, (struct dvfm_md_opt *)info->op);
++
++ return 0;
++}
++
++EXPORT_SYMBOL(cop_get_num_of_cops);
++EXPORT_SYMBOL(cop_get_cop);
++EXPORT_SYMBOL(cop_set_cop);
++EXPORT_SYMBOL(cop_get_def_cop);
++EXPORT_SYMBOL(cop_set_def_cop);
++EXPORT_SYMBOL(cop_get_cur_cop);
++
++/*****************************************************************************/
++/* */
++/* Module Init/Exit */
++/* */
++/*****************************************************************************/
++
++static int __init prm_init(void)
++{
++ int ret, i , j;
++
++ proc_prm_init();
++ init_rwsem(&prm_sem);
++ /*prm_sem.debug = 1;
++ */
++ for (i = 0; i < RESOURCE_NUM; i++) {
++ prm_resources[i].access = NULL;
++ prm_resources[i].id = i;
++ proc_add_resource(&prm_resources[i]);
++ for (j = 0; j < MAX_PRIORITIES;j++) {
++ prm_resources[i].priority[j].resource = &prm_resources[i];
++ prm_resources[i].priority[j].allocate = NULL;
++ prm_resources[i].priority[j].active = STATE_UNDEF;
++ INIT_LIST_HEAD(&(prm_resources[i].priority[j].entry));
++ proc_add_resource_state(&prm_resources[i].priority[j], j);
++ }
++ }
++
++ for (i = 0; i < MAX_CLIENTS; i++) {
++ prm_clients[i] = NULL;
++ }
++ prm_pmu_client = NULL;
++
++ ret = request_irq(IRQ_PMU, prm_pmu_handler, 0, "PMU", NULL);
++ if (ret < 0) {
++ DPRINTK("PMU interrupt handler registeration: failed!\n");
++ return ret;
++ } else {
++ DPRINTK("PMU interrupt handler registeration: OK!\n");
++ }
++
++ DPRINTK("CPU_ID = 0x%08x\n", prm_get_cpuid());
++
++ return 0;
++}
++
++static void __exit prm_exit(void)
++{
++ int i, j;
++
++ for (i = 0; i < RESOURCE_NUM; i++) {
++ for(j = 0; j < MAX_PRIORITIES;j++) {
++ proc_del_resource_state(&prm_resources[i].priority[j], j);
++ }
++ proc_del_resource(&prm_resources[i]);
++ memset(&(prm_resources[i]), 0x0, sizeof(struct prm_resource));
++ }
++
++ for (i = 0; i < MAX_CLIENTS; i++) {
++ if (prm_clients[i]) {
++ if (prm_clients[i]->group_cnt) {
++ for (j = 0; j < MAX_GROUPS; j++) {
++ if (prm_clients[i]->groups[j]) {
++ proc_del_group(prm_clients[i],
++ prm_clients[i]->groups[j], j);
++ kfree(prm_clients[i]->groups[j]);
++ }
++ }
++ }
++ proc_del_client(prm_clients[i]);
++ kfree(prm_clients[i]);
++ }
++ }
++ prm_pmu_client = NULL;
++ free_irq(IRQ_PMU, NULL);
++ proc_prm_exit();
++}
++
++module_init(prm_init);
++module_exit(prm_exit);
++
++MODULE_DESCRIPTION("Performance Resources Management");
++MODULE_LICENSE("GPL");
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/pxa3xx.c kernel/arch/arm/mach-pxa/pxa3xx.c
+--- linux-2.6.32/arch/arm/mach-pxa/pxa3xx.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/pxa3xx.c 2009-12-12 16:09:26.482948915 +0200
+@@ -613,3 +613,4 @@
+ }
+
+ postcore_initcall(pxa3xx_init);
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/pxa3xx_dvfm.c kernel/arch/arm/mach-pxa/pxa3xx_dvfm.c
+--- linux-2.6.32/arch/arm/mach-pxa/pxa3xx_dvfm.c 2009-12-13 13:00:35.598610849 +0200
++++ kernel/arch/arm/mach-pxa/pxa3xx_dvfm.c 2009-12-12 16:09:26.482948915 +0200
+@@ -0,0 +1,2319 @@
++/*
++ * PXA3xx DVFM Driver
++ *
++ * Copyright (C) 2007 Marvell Corporation
++ * Haojian Zhuang <haojian.zhuang@marvell.com>
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++
++ * (C) Copyright 2007 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#define DEBUG
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sysdev.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/workqueue.h>
++#include <linux/delay.h>
++#include <linux/list.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <asm/uaccess.h>
++//#include <asm/arch/pxa-regs.h>
++#include <mach/pxa3xx-regs.h>
++#include <mach/pxa3xx_pmic.h>
++#include <mach/hardware.h>
++#include <mach/dvfm.h>
++#include <mach/pxa3xx_dvfm.h>
++#include <asm/io.h>
++//#include <asm/arch/pxa_ispt.h>
++//#include <asm/arch/mspm_prof.h>
++
++#include "devices.h"
++
++#ifdef CONFIG_CPU_PXA310
++#define FREQ_CORE(xl, xn) ((xl)*(xn)*13)
++
++#define FREQ_SRAM(sflfs) (((sflfs) == 0x0)?104: \
++ ((sflfs) == 0x1)?156: \
++ ((sflfs) == 0x2)?208:312)
++
++#define FREQ_STMM(smcfs) (((smcfs) == 0x0)?78: \
++ ((smcfs) == 0x2)?104: \
++ ((smcfs) == 0x5)?208:0)
++
++#define FREQ_DDR(dmcfs) (((dmcfs) == 0x0)?26: \
++ ((dmcfs) == 0x2)?208: \
++ ((dmcfs) == 0x3)?260:0)
++
++#define FREQ_HSS(hss) (((hss) == 0x0)?104: \
++ ((hss) == 0x1)?156: \
++ ((hss) == 0x2)?208:0)
++
++#define FREQ_DFCLK(smcfs, df_clkdiv) \
++ (((df_clkdiv) == 0x1)?FREQ_STMM((smcfs)): \
++ ((df_clkdiv) == 0x2)?FREQ_STMM((smcfs))/2: \
++ ((df_clkdiv) == 0x3)?FREQ_STMM((smcfs))/4:0)
++
++#define FREQ_EMPICLK(smcfs, empi_clkdiv) \
++ (((empi_clkdiv) == 0x1)?FREQ_STMM((smcfs)): \
++ ((empi_clkdiv) == 0x2)?FREQ_STMM((smcfs))/2: \
++ ((empi_clkdiv) == 0x3)?FREQ_STMM((smcfs))/4:0)
++
++#define LPJ_PER_MHZ 4988
++#endif
++
++/* Enter D2 before exiting D0CS */
++#define DVFM_LP_SAFE
++
++struct pxa3xx_dvfm_info {
++ /* flags */
++ uint32_t flags;
++
++ /* CPU ID */
++ uint32_t cpuid;
++
++ /* LCD clock */
++ struct clk *lcd_clk;
++
++ /* clock manager register base */
++ unsigned char __iomem *clkmgr_base;
++
++ /* service power management unit */
++ unsigned char __iomem *spmu_base;
++
++ /* slave power management unit */
++ unsigned char __iomem *bpmu_base;
++
++ /* dynamic memory controller register base */
++ unsigned char __iomem *dmc_base;
++
++ /* static memory controller register base */
++ unsigned char __iomem *smc_base;
++};
++
++#define MIN_SAFE_FREQUENCY 624
++
++struct info_head pxa3xx_dvfm_op_list = {
++ .list = LIST_HEAD_INIT(pxa3xx_dvfm_op_list.list),
++ .lock = RW_LOCK_UNLOCKED,
++};
++
++#ifdef CONFIG_PXA3xx_DVFM_STATS
++
++static unsigned int switch_lowpower_before, switch_lowpower_after;
++
++static int pxa3xx_stats_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data);
++static struct notifier_block notifier_freq_block = {
++ .notifier_call = pxa3xx_stats_notifier_freq,
++};
++#endif
++
++/* the operating point preferred by policy maker or user */
++static int preferred_op;
++static int current_op;
++
++extern unsigned int cur_op; /* current operating point */
++extern unsigned int def_op; /* default operating point */
++
++extern int enter_d0cs_a(volatile u32 *, volatile u32 *);
++extern int exit_d0cs_a(volatile u32 *, volatile u32 *);
++extern int md2fvinfo(struct pxa3xx_fv_info *, struct dvfm_md_opt *);
++extern void set_idle_op(int, int);
++
++#ifdef CONFIG_FB_PXA
++extern void pxafb_set_pcd(void);
++#else
++static void pxafb_set_pcd(void) {}
++#endif
++
++static int dvfm_dev_id;
++#define LPJ_D0CS (293888 * 100 / HZ)
++#define LPJ_104M (517120 * 100 / HZ)
++#define LPJ_156M (778128 * 100 / HZ)
++#define LPJ_208M (1036288 * 100 / HZ)
++#define LPJ_416M (2076672 * 100 / HZ)
++#define LPJ_624M (3112960 * 100 / HZ)
++#define LPJ_806M (4020906 * 100 / HZ)
++
++static int d0cs_lpj = LPJ_D0CS;
++
++static int boot_core_freq = 0;
++
++int out_d0cs = 0;
++
++/* define the operating point of S0D0 and S0D0CS mode */
++static struct dvfm_md_opt pxa300_op_array[] = {
++ /* 60MHz -- ring oscillator */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 0,
++ .xn = 0,
++ .smcfs = 15,
++ .sflfs = 60,
++ .hss = 60,
++ .dmcfs = 30, /* will be 60MHZ for PXA310 A2 and PXA935/PXA940 */
++ .df_clk = 15,
++ .empi_clk = 15,
++ .power_mode = POWER_MODE_D0CS,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 293888*100/HZ,
++ .name = "D0CS",
++ },
++ /* 104MHz */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 8,
++ .xn = 1,
++ .smcfs = 78,
++ .sflfs = 104,
++ .hss = 104,
++ .dmcfs = 260,
++ /* Actually it's 19.5, not 19 */
++ .df_clk = 19,
++ .empi_clk = 19,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 517120*100/HZ,
++ .name = "104M",
++ },
++ /* 208MHz */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 16,
++ .xn = 1,
++ .smcfs = 104,
++ .sflfs = 156,
++ .hss = 104,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 1036288*100/HZ,
++ .name = "208M",
++ },
++ /* 416MHz */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .xl = 16,
++ .xn = 2,
++ .smcfs = 104,
++ .sflfs = 208,
++ .hss = 156,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 2076672*100/HZ,
++ .name = "416M",
++ },
++ /* 624MHz */
++ {
++ .vcc_core = 1375,
++ .vcc_sram = 1400,
++ .xl = 24,
++ .xn = 2,
++ .smcfs = 208,
++ .sflfs = 312,
++ .hss = 208,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 3112960*100/HZ,
++ .name = "624M",
++ },
++ /* D1 mode */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .power_mode = POWER_MODE_D1,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D1",
++ },
++ /* D2 mode */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .power_mode = POWER_MODE_D2,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D2",
++ },
++};
++
++static struct dvfm_md_opt pxa320_op_array[] = {
++ /* 60MHz -- ring oscillator */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 0,
++ .xn = 0,
++ .smcfs = 15,
++ .sflfs = 60,
++ .hss = 60,
++ .dmcfs = 30,
++ .df_clk = 15,
++ .empi_clk = 15,
++ .power_mode = POWER_MODE_D0CS,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 293888*100/HZ,
++ .name = "D0CS",
++ },
++ /* 104MHz */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 8,
++ .xn = 1,
++ .smcfs = 78,
++ .sflfs = 104,
++ .hss = 104,
++ .dmcfs = 260,
++ /* Actually it's 19.5, not 19 */
++ .df_clk = 19,
++ .empi_clk = 19,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 517120*100/HZ,
++ .name = "104M",
++ },
++ /* 208MHz */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 16,
++ .xn = 1,
++ .smcfs = 104,
++ .sflfs = 156,
++ .hss = 104,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 1036288*100/HZ,
++ .name = "208M",
++ },
++ /* 416MHz */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .xl = 16,
++ .xn = 2,
++ .smcfs = 104,
++ .sflfs = 208,
++ .hss = 156,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 2076672*100/HZ,
++ .name = "416M",
++ },
++ /* 624MHz */
++ {
++ .vcc_core = 1375,
++ .vcc_sram = 1400,
++ .xl = 24,
++ .xn = 2,
++ .smcfs = 208,
++ .sflfs = 312,
++ .hss = 208,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 3112960*100/HZ,
++ .name = "624M",
++ },
++ /* 806MHz */
++ {
++ .vcc_core = 1400,
++ .vcc_sram = 1400,
++ .xl = 31,
++ .xn = 2,
++ .smcfs = 208,
++ .sflfs = 312,
++ .hss = 208,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 4020906*100/HZ,
++ .name = "806M",
++ },
++#if 0
++ /* D1 mode */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .power_mode = POWER_MODE_D1,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D1",
++ },
++#endif
++ /* D2 mode */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .power_mode = POWER_MODE_D2,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D2",
++ },
++};
++
++static struct dvfm_md_opt pxa930_op_array[] = {
++ /* 60MHz -- ring oscillator */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 0,
++ .xn = 0,
++ .smcfs = 15,
++ .sflfs = 60,
++ .hss = 60,
++ .dmcfs = 30,
++ .df_clk = 15,
++ .empi_clk = 15,
++ .power_mode = POWER_MODE_D0CS,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 293888*100/HZ,
++ .name = "D0CS",
++ },
++ /* 156MHz -- single PLL mode */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 12,
++ .xn = 1,
++ .smcfs = 104,
++ .sflfs = 156,
++ .hss = 104,
++ .dmcfs = 208,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 778128*100/HZ,
++ .name = "156M",
++ },
++ /* 208MHz */
++ {
++ .vcc_core = 1000,
++ .vcc_sram = 1100,
++ .xl = 16,
++ .xn = 1,
++ .smcfs = 104,
++ .sflfs = 156,
++ .hss = 104,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 1036288*100/HZ,
++ .name = "208M",
++ },
++ /* 416MHz */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .xl = 16,
++ .xn = 2,
++ .smcfs = 104,
++ .sflfs = 208,
++ .hss = 156,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 2076672*100/HZ,
++ .name = "416M",
++ },
++ /* 624MHz */
++ {
++ .vcc_core = 1375,
++ .vcc_sram = 1400,
++ .xl = 24,
++ .xn = 2,
++ .smcfs = 208,
++ .sflfs = 312,
++ .hss = 208,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 3112960*100/HZ,
++ .name = "624M",
++ },
++ /* D2 mode */
++ {
++ .vcc_core = 1100,
++ .vcc_sram = 1200,
++ .power_mode = POWER_MODE_D2,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D2",
++ },
++};
++
++static struct dvfm_md_opt pxa935_op_array[] = {
++ /* 60MHz -- ring oscillator */
++ {
++ .vcc_core = 1250,
++ .xl = 0,
++ .xn = 0,
++ .smcfs = 15,
++ .sflfs = 60,
++ .hss = 60,
++ .dmcfs = 30,
++ .df_clk = 15,
++ .empi_clk = 15,
++ .power_mode = POWER_MODE_D0CS,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 293888*100/HZ,
++ .name = "D0CS",
++ },
++ /* 156MHz -- single PLL mode */
++ {
++ .vcc_core = 1250,
++ .xl = 12,
++ .xn = 1,
++ .smcfs = 104,
++ .sflfs = 156,
++ .hss = 104,
++ .dmcfs = 208,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 778128*100/HZ,
++ .name = "156M",
++ },
++ /* 208MHz */
++ {
++ .vcc_core = 1250,
++ .xl = 16,
++ .xn = 1,
++ .smcfs = 104,
++ .sflfs = 156,
++ .hss = 104,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 1036288*100/HZ,
++ .name = "208M",
++ },
++ /* 416MHz */
++ {
++ .vcc_core = 1250,
++ .xl = 16,
++ .xn = 2,
++ .smcfs = 104,
++ .sflfs = 208,
++ .hss = 156,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 2076672*100/HZ,
++ .name = "416M",
++ },
++ /* 624MHz */
++ {
++ .vcc_core = 1250,
++ .xl = 24,
++ .xn = 2,
++ .smcfs = 208,
++ .sflfs = 312,
++ .hss = 208,
++ .dmcfs = 260,
++ .df_clk = 52,
++ .empi_clk = 52,
++ .power_mode = POWER_MODE_D0,
++ .flag = OP_FLAG_FACTORY,
++ .lpj = 3112960*100/HZ,
++ .name = "624M",
++ },
++#if 0
++ /* D1 mode */
++ {
++ .vcc_core = 1250,
++ .power_mode = POWER_MODE_D1,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D1",
++ },
++#endif
++ /* D2 mode */
++ {
++ .vcc_core = 1250,
++ .power_mode = POWER_MODE_D2,
++ .flag = OP_FLAG_FACTORY,
++ .name = "D2",
++ },
++ /* CG (clock gated) mode */
++ {
++ .vcc_core = 1250,
++ .power_mode = POWER_MODE_CG,
++ .flag = OP_FLAG_FACTORY,
++ .name = "CG",
++ },
++
++};
++
++struct proc_op_array {
++ unsigned int cpuid;
++ char *cpu_name;
++ struct dvfm_md_opt *op_array;
++ unsigned int nr_op;
++};
++
++#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
++static struct proc_op_array proc_op_arrays[] = {
++ {0x6880, "PXA300", ARRAY_AND_SIZE(pxa300_op_array)},
++ {0x6890, "PXA310", ARRAY_AND_SIZE(pxa300_op_array)},
++ {0x6820, "PXA320", ARRAY_AND_SIZE(pxa320_op_array)},
++ {0x6830, "PXA930", ARRAY_AND_SIZE(pxa930_op_array)},
++ {0x6930, "PXA935/PXA940", ARRAY_AND_SIZE(pxa935_op_array)},
++};
++
++extern void pxa_clkcfg_write(unsigned int);
++
++static int prepare_dmc(void *driver_data, int flag);
++static int polling_dmc(void *driver_data);
++
++#ifdef CONFIG_ISPT
++static int ispt_dvfm_op(int old, int new)
++{
++ return ispt_dvfm_msg(old, new);
++}
++
++static int ispt_block_dvfm(int enable, int dev_id)
++{
++ int ret;
++ if (enable)
++ ret = ispt_driver_msg(CT_P_DVFM_BLOCK_REQ, dev_id);
++ else
++ ret = ispt_driver_msg(CT_P_DVFM_BLOCK_REL, dev_id);
++ return ret;
++}
++
++static int ispt_power_state_d2(void)
++{
++ return ispt_power_msg(CT_P_PWR_STATE_ENTRY_D2);
++}
++#else
++static int ispt_dvfm_op(int old, int new) { return 0; }
++static int ispt_block_dvfm(int enable, int dev_id) { return 0; }
++static int ispt_power_state_d2(void) { return 0; }
++#endif
++
++unsigned int pxa3xx_clk_to_lpj(unsigned int clk)
++{
++ if (clk == 624000000)
++ return LPJ_624M;
++ if (clk == 416000000)
++ return LPJ_416M;
++ if (clk == 208000000)
++ return LPJ_208M;
++ if (clk == 156000000)
++ return LPJ_156M;
++ if (clk == 104000000)
++ return LPJ_104M;
++ if (clk == 60000000)
++ return LPJ_D0CS;
++
++ printk(KERN_CRIT "%s does not support clk (%d MHz)\n",
++ __FILE__, clk/1000000);
++
++ return 0;
++}
++
++/* #####################Debug Function######################## */
++static int dump_op(void *driver_data, struct op_info *p, char *buf)
++{
++ int len, count, x;
++ struct dvfm_md_opt *q = (struct dvfm_md_opt *)p->op;
++
++ if (q == NULL)
++ len = sprintf(buf, "Can't dump the op info\n");
++ else {
++ /* calculate how much bits is set in device word */
++ x = p->device;
++ for (count = 0; x; x = x & (x - 1), count++);
++ len = sprintf(buf, "OP:%d name:%s [%s, %d]\n",
++ p->index, q->name, (count)?"Disabled"
++ :"Enabled", count);
++ len += sprintf(buf + len, "vcore:%d vsram:%d xl:%d xn:%d "
++ "smcfs:%d sflfs:%d hss:%d dmcfs:%d df_clk:%d "
++ "power_mode:%d flag:%d\n",
++ q->vcc_core, q->vcc_sram, q->xl, q->xn,
++ q->smcfs, q->sflfs, q->hss, q->dmcfs,
++ q->df_clk, q->power_mode, q->flag);
++ }
++ return len;
++}
++
++static int dump_op_list(void *driver_data, struct info_head *op_table, int flag)
++{
++ struct op_info *p = NULL;
++ struct dvfm_md_opt *q = NULL;
++ struct list_head *list = NULL;
++ struct pxa3xx_dvfm_info *info = driver_data;
++ char buf[256];
++
++ if (!op_table || list_empty(&op_table->list)) {
++ printk(KERN_WARNING "op list is null\n");
++ return -EINVAL;
++ }
++ memset(buf, 0, 256);
++ list_for_each(list, &op_table->list) {
++ p = list_entry(list, struct op_info, list);
++ q = (struct dvfm_md_opt *)p->op;
++ if (q->flag <= flag) {
++ dump_op(info, p, buf);
++ pr_debug("%s", buf);
++ }
++ }
++ return 0;
++}
++
++/* ########################################################## */
++static int freq2reg(struct pxa3xx_fv_info *fv_info, struct dvfm_md_opt *orig)
++{
++ int res = -EFAULT, tmp;
++
++ if (orig && fv_info) {
++ fv_info->vcc_core = orig->vcc_core;
++ fv_info->vcc_sram = orig->vcc_sram;
++ if (orig->power_mode == POWER_MODE_D0) {
++ res = 0;
++ fv_info->xl = orig->xl;
++ fv_info->xn = orig->xn;
++ fv_info->d0cs = 0;
++ if (orig->smcfs == 78)
++ fv_info->smcfs = 0;
++ else if (orig->smcfs == 104)
++ fv_info->smcfs = 2;
++ else if (orig->smcfs == 208)
++ fv_info->smcfs = 5;
++ else
++ res = -EINVAL;
++ if (orig->sflfs == 104)
++ fv_info->sflfs = 0;
++ else if (orig->sflfs == 156)
++ fv_info->sflfs = 1;
++ else if (orig->sflfs == 208)
++ fv_info->sflfs = 2;
++ else if (orig->sflfs == 312)
++ fv_info->sflfs = 3;
++ else
++ res = -EINVAL;
++ if (orig->hss == 104)
++ fv_info->hss = 0;
++ else if (orig->hss == 156)
++ fv_info->hss = 1;
++ else if (orig->hss == 208)
++ fv_info->hss = 2;
++ else
++ res = -EINVAL;
++ if (orig->dmcfs == 26)
++ fv_info->dmcfs = 0;
++ else if (orig->dmcfs == 208)
++ fv_info->dmcfs = 2;
++ else if (orig->dmcfs == 260)
++ fv_info->dmcfs = 3;
++ else
++ res = -EINVAL;
++ tmp = orig->smcfs / orig->df_clk;
++ if (tmp == 2)
++ fv_info->df_clk = 2;
++ else if (tmp == 4)
++ fv_info->df_clk = 3;
++ fv_info->empi_clk = fv_info->df_clk;
++ } else if (orig->power_mode == POWER_MODE_D0CS) {
++ fv_info->d0cs = 1;
++ res = 0;
++ }
++ }
++ return res;
++}
++
++int md2fvinfo(struct pxa3xx_fv_info *fv_info, struct dvfm_md_opt *orig)
++{
++ return freq2reg(fv_info, orig);
++}
++
++static int reg2freq(void *driver_data, struct dvfm_md_opt *fv_info)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ int res = -EFAULT, tmp;
++ uint32_t accr;
++
++ if (fv_info) {
++ res = 0;
++ if (fv_info->power_mode == POWER_MODE_D0CS) {
++ /* set S0D0CS operating pointer */
++ fv_info->power_mode = POWER_MODE_D0CS;
++ fv_info->xl = 0;
++ fv_info->xn = 0;
++ fv_info->smcfs = 15;
++ fv_info->sflfs = 60;
++ fv_info->hss = 60;
++ /* PXA310 A2 or PXA935/PXA940 */
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ if (accr & 0x80)
++ fv_info->dmcfs = 60;
++ else
++ fv_info->dmcfs = 30;
++ fv_info->df_clk = 15;
++ fv_info->empi_clk = 15;
++ } else {
++ /* set S0D0 operating pointer */
++ fv_info->power_mode = POWER_MODE_D0;
++ tmp = fv_info->smcfs;
++ if (tmp == 0)
++ fv_info->smcfs = 78;
++ else if (tmp == 2)
++ fv_info->smcfs = 104;
++ else if (tmp == 5)
++ fv_info->smcfs = 208;
++ else
++ res = -EINVAL;
++ tmp = fv_info->sflfs;
++ if (tmp == 0)
++ fv_info->sflfs = 104;
++ else if (tmp == 1)
++ fv_info->sflfs = 156;
++ else if (tmp == 2)
++ fv_info->sflfs = 208;
++ else if (tmp == 3)
++ fv_info->sflfs = 312;
++ tmp = fv_info->hss;
++ if (tmp == 0)
++ fv_info->hss = 104;
++ else if (tmp == 1)
++ fv_info->hss = 156;
++ else if (tmp == 2)
++ fv_info->hss = 208;
++ else
++ res = -EINVAL;
++ tmp = fv_info->dmcfs;
++ if (tmp == 0)
++ fv_info->dmcfs = 26;
++ else if (tmp == 2)
++ fv_info->dmcfs = 208;
++ else if (tmp == 3)
++ fv_info->dmcfs = 260;
++ else
++ res = -EINVAL;
++ tmp = fv_info->df_clk;
++ if (tmp == 1)
++ fv_info->df_clk = fv_info->smcfs;
++ else if (tmp == 2)
++ fv_info->df_clk = fv_info->smcfs / 2;
++ else if (tmp == 3)
++ fv_info->df_clk = fv_info->smcfs / 4;
++ fv_info->empi_clk = fv_info->df_clk;
++ }
++ }
++ return res;
++}
++
++/* Get current setting, and record it in fv_info structure
++ */
++static int capture_op_info(void *driver_data, struct dvfm_md_opt *fv_info)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ int res = -EFAULT;
++ uint32_t acsr, memclkcfg;
++
++ if (fv_info) {
++ memset(fv_info, 0, sizeof(struct dvfm_md_opt));
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF);
++ fv_info->xl = (acsr >> ACCR_XL_OFFSET) & 0x1F;
++ fv_info->xn = (acsr >> ACCR_XN_OFFSET) & 0x07;
++ fv_info->smcfs = (acsr >> ACCR_SMCFS_OFFSET) & 0x07;
++ fv_info->sflfs = (acsr >> ACCR_SFLFS_OFFSET) & 0x03;
++ fv_info->hss = (acsr >> ACCR_HSS_OFFSET) & 0x03;
++ fv_info->dmcfs = (acsr >> ACCR_DMCFS_OFFSET) & 0x03;
++ fv_info->power_mode = (acsr >> ACCR_D0CS_OFFSET) & 0x01;
++ memclkcfg = __raw_readl(info->smc_base + MEMCLKCFG_OFF);
++ fv_info->df_clk = (memclkcfg >> MEMCLKCFG_DF_OFFSET) & 0x07;
++ fv_info->empi_clk = (memclkcfg >> MEMCLKCFG_EMPI_OFFSET) & 0x07;
++ res = reg2freq(info, fv_info);
++ pxa3xx_pmic_get_voltage(VCC_CORE, &fv_info->vcc_core);
++ if ((info->cpuid & 0xFFF0) == 0x6930) {
++ /* PXA935/PXA940 doesn't have VCC_SRAM */
++ fv_info->vcc_sram = 0;
++ } else {
++ pxa3xx_pmic_get_voltage(VCC_SRAM, &fv_info->vcc_sram);
++ }
++ /* TODO: mix up the usage of struct dvfm_md_opt and struct pxa3xx_fv_info
++ * better to define reg2freq(struct dvfm_md_opt *md_info,
++ * struct pxa3xx_fv_info *fv_info)
++ */
++ }
++ return res;
++}
++
++/* return all op including user defined op, and boot op */
++static int get_op_num(void *driver_data, struct info_head *op_table)
++{
++ struct list_head *entry = NULL;
++ int num = 0;
++
++ if (!op_table)
++ goto out;
++ read_lock(&op_table->lock);
++ if (list_empty(&op_table->list)) {
++ read_unlock(&op_table->lock);
++ goto out;
++ }
++ list_for_each(entry, &op_table->list) {
++ num++;
++ }
++ read_unlock(&op_table->lock);
++out:
++ return num;
++}
++
++/* return op name. */
++static char *get_op_name(void *driver_data, struct op_info *p)
++{
++ struct dvfm_md_opt *q = NULL;
++ if (p == NULL)
++ return NULL;
++ q = (struct dvfm_md_opt *)p->op;
++ return q->name;
++}
++
++static int update_voltage(void *driver_data, struct dvfm_md_opt *old, struct dvfm_md_opt *new)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++
++ if (!(info->flags & PXA3xx_USE_POWER_I2C)) {
++ pxa3xx_pmic_set_voltage(VCC_CORE, new->vcc_core);
++ pxa3xx_pmic_set_voltage(VCC_SRAM, new->vcc_sram);
++ }
++ return 0;
++}
++
++static void pxa3xx_enter_d0cs(void *driver_data)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++
++ unsigned int reg, spll = 0;
++ uint32_t accr, mdrefr;
++
++ reg = (12 << ACCR_XL_OFFSET) | (1 << ACCR_XN_OFFSET);
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ if (reg == (accr & (ACCR_XN_MASK | ACCR_XL_MASK))) {
++ spll = 1;
++ }
++ /* clk_disable(info->lcd_clk);*/
++ enter_d0cs_a((volatile u32 *)info->clkmgr_base, (volatile u32 *)info->dmc_base);
++ pxafb_set_pcd();
++ /* clk_enable(info->lcd_clk);*/
++ /* update to D0CS LPJ, it must be updated before udelay() */
++ loops_per_jiffy = d0cs_lpj;
++ if (cpu_is_pxa930())
++ udelay(200);
++ else
++ udelay(100);
++
++ /* disable PLL */
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ if (spll) {
++ /* single PLL mode only disable System PLL */
++ accr |= (1 << ACCR_SPDIS_OFFSET);
++ } else {
++ /* Disable both System PLL and Core PLL */
++ accr |= (1 << ACCR_XPDIS_OFFSET) | (1 << ACCR_SPDIS_OFFSET);
++ }
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++
++ mdrefr = __raw_readl(info->dmc_base + MDREFR_OFF);
++ __raw_writel(mdrefr, info->dmc_base + MDREFR_OFF);
++}
++
++static void pxa3xx_exit_d0cs(void *driver_data)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ unsigned int spll = 0;
++ uint32_t reg, accr, acsr, mdrefr;
++
++ reg = (12 << ACCR_XL_OFFSET) | (1 << ACCR_XN_OFFSET);
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ if (reg == (accr & (ACCR_XN_MASK | ACCR_XL_MASK))) {
++ spll = 1;
++ }
++ /* enable PLL */
++ if (spll) {
++ /* single PLL mode only enable System PLL */
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ accr &= ~(1 << ACCR_SPDIS_OFFSET);
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++ do {
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF);
++ } while (acsr & (1 << ACCR_SPDIS_OFFSET));
++ } else {
++ /* enable both System PLL and Core PLL */
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ accr &= ~((1 << ACCR_XPDIS_OFFSET) |
++ (1 << ACCR_SPDIS_OFFSET));
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++ do {
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF);
++ } while (acsr & (1 << ACCR_XPDIS_OFFSET)
++ || acsr & (1 << ACCR_SPDIS_OFFSET));
++ }
++
++ /* clk_disable(info->lcd_clk);*/
++ exit_d0cs_a((volatile u32 *)info->clkmgr_base, (volatile u32 *)info->dmc_base);
++ mdrefr = __raw_readl(info->dmc_base + MDREFR_OFF);
++ __raw_writel(mdrefr, info->dmc_base + MDREFR_OFF);
++ pxafb_set_pcd();
++ /* clk_enable(info->lcd_clk);*/
++}
++
++/* Return 1 if Grayback PLL is on. */
++static int check_grayback_pll(void *driver_data)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++
++ return (__raw_readl(info->clkmgr_base + OSCC_OFF) & (1 << 17));
++}
++
++static int set_grayback_pll(void *driver_data, int lev)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ int timeout = 100, turnoff;
++ uint32_t oscc, agenp;
++
++ if ((info->cpuid & 0xFFF0) != 0x6830 && (info->cpuid & 0xFFF0) != 0x6930) {
++ /* It's not PXA930/PXA935/PXA940*/
++ return 0;
++ }
++ if (lev) {
++ /* turn on grayback PLL */
++ for (;;){
++ timeout = 100;
++ /* clear OSCC[GPRM] */
++ oscc = __raw_readl(info->clkmgr_base + OSCC_OFF);
++ oscc &= ~(1 << 18);
++ __raw_writel(oscc, info->clkmgr_base + OSCC_OFF);
++
++ /* set AGENP[GBPLL_CTRL] and AGENP[GBPLL_ST] */
++ agenp = __raw_readl(info->bpmu_base + AGENP_OFF);
++ agenp |= (3 << 28);
++ __raw_writel(agenp, info->bpmu_base + AGENP_OFF);
++
++ /* check OSCC[GPRL] */
++ do {
++ oscc = __raw_readl(info->clkmgr_base + OSCC_OFF);
++ if (--timeout == 0)
++ break;
++ } while (!(oscc & (1 << 17)));
++
++ if (timeout)
++ break;
++ }
++ } else {
++ /* turn off Grayback PLL */
++ for (;;){
++ timeout = 100;
++ /* clear AGENP[GBPLL_CTRL] and AGENP[GBPLL_ST] */
++ agenp = __raw_readl(info->bpmu_base + AGENP_OFF);
++ if (agenp & (1 << 28)) {
++ turnoff = 1;
++ agenp &= ~(3 << 28);
++ agenp |= (2 << 28);
++ __raw_writel(agenp, info->bpmu_base + AGENP_OFF);
++
++ /* check OSCC[GPRL] */
++ do {
++ oscc = __raw_readl(info->clkmgr_base + OSCC_OFF);
++ if (--timeout == 0)
++ break;
++ } while ((oscc & (1 << 17)));
++ }
++
++ if (timeout)
++ break;
++ }
++ if (turnoff) {
++ /* set OSCC[GPRM] */
++ oscc = __raw_readl(info->clkmgr_base + OSCC_OFF);
++ oscc |= (1 << 18);
++ __raw_writel(oscc, info->clkmgr_base + OSCC_OFF);
++ }
++ }
++ return 0;
++}
++
++/*
++ * Return 2 if MTS should be changed to 2.
++ * Return 1 if MTS should be changed to 1.
++ * Return 0 if MTS won't be changed.
++ * In this function, the maxium MTS is 2.
++ */
++static int check_mts(struct dvfm_md_opt *old, struct dvfm_md_opt *new)
++{
++ int ret = 0;
++ if ((old->xn == 1) && (new->xn == 2))
++ ret = 2;
++ if ((old->xn == 2) && (new->xn == 1))
++ ret = 1;
++ return ret;
++}
++
++static int set_mts(void *driver_data, int mts)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ unsigned int ascr;
++
++ ascr = __raw_readl(info->bpmu_base + ASCR_OFF);
++ ascr &= ~(3 << ASCR_MTS_OFFSET);
++ ascr |= (mts << ASCR_MTS_OFFSET);
++ __raw_writel(ascr, info->bpmu_base + ASCR_OFF);
++
++ /* wait MTS is set */
++ do {
++ ascr = __raw_readl(info->bpmu_base + ASCR_OFF);
++ }while (((ascr >> ASCR_MTS_OFFSET) & 0x3)
++ != ((ascr >> ASCR_MTS_S_OFFSET) & 0x3));
++
++ return 0;
++}
++
++static int prepare_dmc(void *driver_data, int flag)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ int pll;
++ uint32_t mdcnfg, ddr_hcal;
++
++ if (flag == DMEMC_D0CS_ENTER) {
++ mdcnfg = __raw_readl(info->dmc_base + MDCNFG_OFF);
++ mdcnfg |= (1 << MDCNFG_HWFREQ_OFFSET);
++ __raw_writel(mdcnfg, info->dmc_base + MDCNFG_OFF);
++
++ ddr_hcal = __raw_readl(info->dmc_base + DDR_HCAL_OFF);
++ ddr_hcal &= ~(1 << HCAL_HCEN_OFFSET);
++ __raw_writel(ddr_hcal, info->dmc_base + DDR_HCAL_OFF);
++
++ return 0;
++ } else if (flag == DMEMC_D0CS_EXIT) {
++ mdcnfg = __raw_readl(info->dmc_base + MDCNFG_OFF);
++ mdcnfg |= (1 << MDCNFG_HWFREQ_OFFSET);
++ __raw_writel(mdcnfg, info->dmc_base + MDCNFG_OFF);
++
++ ddr_hcal = __raw_readl(info->dmc_base + DDR_HCAL_OFF);
++ ddr_hcal |= (1 << HCAL_HCEN_OFFSET);
++ __raw_writel(ddr_hcal, info->dmc_base + DDR_HCAL_OFF);
++
++ return 0;
++ } else if (flag == DMEMC_FREQ_LOW) {
++ pll = 3;
++ } else {
++ pll = 2;
++ }
++
++ mdcnfg = __raw_readl(info->dmc_base + MDCNFG_OFF);
++ mdcnfg &= ~(3 << 28);
++ mdcnfg |= (pll << 28);
++ __raw_writel(mdcnfg, info->dmc_base + MDCNFG_OFF);
++ mdcnfg = __raw_readl(info->dmc_base + MDCNFG_OFF);
++
++ ddr_hcal = __raw_readl(info->dmc_base + DDR_HCAL_OFF);
++ ddr_hcal |= (1 << HCAL_HCEN_OFFSET);
++ __raw_writel(ddr_hcal, info->dmc_base + DDR_HCAL_OFF);
++ ddr_hcal = __raw_readl(info->dmc_base + DDR_HCAL_OFF);
++
++ do {
++ /*pr_debug("polling MDCNFG:0x%x\n", MDCNFG);*/
++ mdcnfg = __raw_readl(info->dmc_base + MDCNFG_OFF);
++ } while (((mdcnfg >> 28) & 0x3) != pll);
++
++ return 0;
++}
++
++static int set_dmc60(void *driver_data, int flag)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ uint32_t accr, reg;
++
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ if (flag)
++ accr |= 0x80;
++ else
++ accr &= ~0x80;
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++ /* polling ACCR */
++ do {
++ reg = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ } while ((accr & 0x80) != (reg & 0x80));
++
++ return 0;
++}
++
++/* set DF and EMPI divider */
++/* TODO: why did not we see DF/EMPI clock as input here? If we want to set DFI_clock or
++ * EMPI clock as other frequecy than 52, how can we do?
++ */
++static int set_df(void *driver_data, int smc)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ uint32_t memclkcfg;
++ int fix_empi;
++
++ if (((info->cpuid > 0x6880) && (info->cpuid <= 0x6881))
++ || ((info->cpuid >= 0x6890) && (info->cpuid <= 0x6892)))
++ /* It's PXA300 or PXA310 */
++ fix_empi = 1;
++ else
++ fix_empi = 0;
++
++ memclkcfg = __raw_readl(info->smc_base + MEMCLKCFG_OFF);
++ memclkcfg &= ~((7 << MEMCLKCFG_DF_OFFSET) | (7 << MEMCLKCFG_EMPI_OFFSET));
++ if (fix_empi) {
++ memclkcfg |= (3 << MEMCLKCFG_EMPI_OFFSET);
++ switch (smc) {
++ case 208:
++ /* divider -- 4 */
++ memclkcfg |= (3 << MEMCLKCFG_DF_OFFSET);
++ break;
++ case 104:
++ /* divider -- 2 */
++ memclkcfg |= (2 << MEMCLKCFG_DF_OFFSET);
++ break;
++ case 78:
++ /* divider -- 4 */
++ memclkcfg |= (3 << MEMCLKCFG_DF_OFFSET);
++ break;
++ }
++ } else {
++ switch (smc) {
++ case 208:
++ /* divider -- 4 */
++ memclkcfg |= (3 << MEMCLKCFG_DF_OFFSET);
++ memclkcfg |= (3 << MEMCLKCFG_EMPI_OFFSET);
++ break;
++ case 104:
++ /* divider -- 2 */
++ memclkcfg |= (2 << MEMCLKCFG_DF_OFFSET);
++ memclkcfg |= (2 << MEMCLKCFG_EMPI_OFFSET);
++ break;
++ case 78:
++ /* divider -- 4 */
++ memclkcfg |= (3 << MEMCLKCFG_DF_OFFSET);
++ memclkcfg |= (3 << MEMCLKCFG_EMPI_OFFSET);
++ break;
++ }
++ }
++ __raw_writel(memclkcfg, info->smc_base + MEMCLKCFG_OFF);
++ memclkcfg = __raw_readl(info->smc_base + MEMCLKCFG_OFF);
++
++ return 0;
++}
++
++/* TODO: sugguest to differentiate the operating point definition from
++ * register info.And we can remove *reg_new here, and convert dvfm_md_opt to
++ * it in the routine. That will make it much more clear.
++ */
++static int update_hss(void *driver_data, struct dvfm_md_opt *old, struct dvfm_md_opt *new,
++ struct pxa3xx_fv_info *fv_info)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ unsigned int accr, acsr;
++
++ if (old->hss != new->hss) {
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ accr &= ~ACCR_HSS_MASK;
++ accr |= (fv_info->hss << ACCR_HSS_OFFSET);
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++ /* wait until ACSR is changed */
++ do {
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF) ;
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF) ;
++ }while ((accr & ACCR_HSS_MASK) != (acsr & ACCR_HSS_MASK));
++ /* clk_disable(info->lcd_clk);*/
++ /* set PCD just after HSS updated */
++ pxafb_set_pcd();
++ /* clk_enable(info->lcd_clk);*/
++ }
++
++ return 0;
++}
++
++static int update_bus_freq(void *driver_data, struct dvfm_md_opt *old, struct dvfm_md_opt *new)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ struct pxa3xx_fv_info fv_info;
++ uint32_t accr, acsr, mdcnfg, mask;
++ int timeout, dmcflag = 1;
++
++ freq2reg(&fv_info, new);
++ if (old->dmcfs < new->dmcfs)
++ prepare_dmc(info, DMEMC_FREQ_HIGH);
++ else if (old->dmcfs > new->dmcfs)
++ prepare_dmc(info, DMEMC_FREQ_LOW);
++ else
++ dmcflag = 0;
++ if (new->smcfs == 208 || new->smcfs == 78)
++ set_df(info, new->smcfs);
++
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ mask = 0;
++ if (old->smcfs != new->smcfs) {
++ accr &= ~ACCR_SMCFS_MASK;
++ accr |= (fv_info.smcfs << ACCR_SMCFS_OFFSET);
++ mask |= ACCR_SMCFS_MASK;
++ }
++ if (old->sflfs != new->sflfs) {
++ accr &= ~ACCR_SFLFS_MASK;
++ accr |= (fv_info.sflfs << ACCR_SFLFS_OFFSET);
++ mask |= ACCR_SFLFS_MASK;
++ }
++ if (old->dmcfs != new->dmcfs) {
++ accr &= ~ACCR_DMCFS_MASK;
++ accr |= (fv_info.dmcfs << ACCR_DMCFS_OFFSET);
++ mask |= ACCR_DMCFS_MASK;
++ }
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++
++ /* wait until ACSR is changed */
++ do {
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF);
++ } while ((accr & mask) != (acsr & mask));
++
++ if (dmcflag) {
++ timeout = 10;
++ do {
++ mdcnfg = __raw_readl(info->dmc_base + MDCNFG_OFF);
++ udelay(1);
++ if (--timeout == 0) {
++ printk(KERN_WARNING "MDCNFG[29:28] isn't zero\n");
++ break;
++ }
++ } while (mdcnfg & ( 3 << 28));
++ }
++
++ if (new->smcfs == 104) {
++ set_df(info, new->smcfs);
++ }
++
++ update_hss(info, old, new, &fv_info);
++
++ return 0;
++}
++
++static int set_freq(void *driver_data, struct dvfm_md_opt *old, struct dvfm_md_opt *new)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ int spll;
++ uint32_t accr, acsr;
++
++ /* check whether new OP is single PLL mode */
++ if ((new->xl == 0x0c) && (new->xn == 0x1))
++ spll = 1;
++ else
++ spll = 0;
++
++ /* turn on Grayback PLL */
++ if (!spll & !check_grayback_pll(info))
++ set_grayback_pll(info ,1);
++ if (check_mts(old, new) == 2)
++ set_mts(info, 2);
++
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ accr &= ~(ACCR_XL_MASK | ACCR_XN_MASK | ACCR_XSPCLK_MASK);
++ accr |= ((new->xl << ACCR_XL_OFFSET) | (new->xn << ACCR_XN_OFFSET)
++ | (3 << ACCR_XSPCLK_OFFSET));
++ __raw_writel(accr, info->clkmgr_base + ACCR_OFF);
++ /* delay 2 cycles of 13MHz clock */
++ udelay(1);
++
++ if (check_mts(old, new) == 1)
++ set_mts(info, 1);
++
++ if ((new->xl == old->xl) && (new->xn != old->xn))
++ /* set T bit */
++ pxa_clkcfg_write(1);
++ else
++ /* set F bit */
++ pxa_clkcfg_write(2);
++ do {
++ accr = __raw_readl(info->clkmgr_base + ACCR_OFF);
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF);
++ } while ((accr & (ACCR_XL_MASK | ACCR_XN_MASK))
++ != (acsr & (ACCR_XL_MASK | ACCR_XN_MASK)));
++
++ udelay(1);
++ update_bus_freq(info, old, new);
++
++ /* turn off Grayback PLL */
++ if (spll)
++ set_grayback_pll(info, 0);
++ return 0;
++}
++
++static int update_freq(void *driver_data, struct dvfm_freqs *freqs)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ static struct dvfm_md_opt before_d0cs;
++ struct dvfm_md_opt old, new;
++ struct op_info *p = NULL;
++ unsigned long flags;
++ int found = 0, new_op = cur_op;
++
++ memset(&old, 0, sizeof(struct dvfm_md_opt));
++ memset(&new, 0, sizeof(struct dvfm_md_opt));
++ write_lock_irqsave(&pxa3xx_dvfm_op_list.lock, flags);
++ if (!list_empty(&pxa3xx_dvfm_op_list.list)) {
++ list_for_each_entry(p, &pxa3xx_dvfm_op_list.list, list) {
++ if (p->index == freqs->old) {
++ found++;
++ memcpy(&old, (struct dvfm_md_opt *)p->op,
++ sizeof(struct dvfm_md_opt));
++ }
++ if (p->index == freqs->new) {
++ found++;
++ memcpy(&new, (struct dvfm_md_opt *)p->op,
++ sizeof(struct dvfm_md_opt));
++ new_op = p->index;
++ }
++ if (found == 2)
++ break;
++ }
++ }
++ write_unlock_irqrestore(&pxa3xx_dvfm_op_list.lock, flags);
++ if (found != 2)
++ return -EINVAL;
++
++ if ((old.power_mode == POWER_MODE_D0)
++ && (new.power_mode == POWER_MODE_D0CS)) {
++ memcpy(&before_d0cs, &old, sizeof(struct dvfm_md_opt));
++
++ pxa3xx_enter_d0cs(info);
++ update_voltage(info, &old, &new);
++ cur_op = new_op;
++ loops_per_jiffy = new.lpj;
++ return 0;
++ } else if ((old.power_mode == POWER_MODE_D0CS)
++ && (new.power_mode == POWER_MODE_D0)) {
++ if (memcmp(&before_d0cs, &new, sizeof(struct dvfm_md_opt))) {
++ /* exit d0cs and set new operating point */
++ if ((before_d0cs.vcc_core < new.vcc_core) ||
++ (before_d0cs.vcc_sram < new.vcc_sram)) {
++ update_voltage(info, &old, &new);
++ } else {
++ update_voltage(info, &old, &before_d0cs);
++ }
++ pxa3xx_exit_d0cs(info);
++ set_freq(info, &before_d0cs, &new);
++
++ if ((before_d0cs.vcc_core > new.vcc_core) ||
++ (before_d0cs.vcc_sram > new.vcc_sram))
++ update_voltage(info, &before_d0cs, &new);
++ } else {
++ update_voltage(info, &old, &new);
++ /* exit d0cs */
++ pxa3xx_exit_d0cs(info);
++ }
++ cur_op = new_op;
++ loops_per_jiffy = new.lpj;
++ return 0;
++ } else if ((old.power_mode == POWER_MODE_D0CS)
++ && (new.power_mode == POWER_MODE_D0CS)) {
++ cur_op = new_op;
++ return 0;
++ }
++
++ if (old.core < new.core) {
++ update_voltage(info, &old, &new);
++ }
++ set_freq(info, &old, &new);
++ if (old.core > new.core) {
++ update_voltage(info, &old, &new);
++ }
++ cur_op = new_op;
++ if ((new.power_mode == POWER_MODE_D0)
++ || (new.power_mode == POWER_MODE_D0CS))
++ loops_per_jiffy = new.lpj;
++ return 0;
++}
++
++/* function of entering low power mode */
++extern void enter_lowpower_mode(int state);
++
++static void do_freq_notify(void *driver_data, struct dvfm_freqs *freqs)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++
++ dvfm_notifier_frequency(freqs, DVFM_FREQ_PRECHANGE);
++ update_freq(info, freqs);
++ dvfm_notifier_frequency(freqs, DVFM_FREQ_POSTCHANGE);
++ ispt_dvfm_op(freqs->old, freqs->new);
++
++ printk("-- dvfm: cur_op=%d\n",cur_op);
++}
++
++static void do_lowpower_notify(void *driver_data, struct dvfm_freqs *freqs, unsigned int state)
++{
++ dvfm_notifier_frequency(freqs, DVFM_FREQ_PRECHANGE);
++ //enter_lowpower_mode(state);
++ dvfm_notifier_frequency(freqs, DVFM_FREQ_POSTCHANGE);
++ ispt_power_state_d2();
++}
++
++static int check_op(void *driver_data, struct dvfm_freqs *freqs, unsigned int new,
++ unsigned int relation)
++{
++ struct op_info *p = NULL;
++ struct dvfm_md_opt *q = NULL;
++ int core, tmp_core = -1, found = 0;
++ int first_op = 0;
++
++ freqs->new = -1;
++ if (!dvfm_find_op(new, &p)) {
++ q = (struct dvfm_md_opt *)p->op;
++ core = q->core;
++ } else
++ return -EINVAL;
++ /*
++ pr_debug("%s, old:%d, new:%d, core:%d\n", __FUNCTION__, freqs->old,
++ new, core);
++ */
++ read_lock(&pxa3xx_dvfm_op_list.lock);
++ if (relation == RELATION_LOW) {
++ /* Set the lowest frequency that is higher than specifed one */
++ list_for_each_entry(p, &pxa3xx_dvfm_op_list.list, list) {
++ q = (struct dvfm_md_opt *)p->op;
++ if (core == 0) {
++ /* Lowpower mode */
++ if ((q->power_mode == POWER_MODE_D1)
++ || (q->power_mode == POWER_MODE_D2)
++ || (q->power_mode == POWER_MODE_CG)) {
++ if (!p->device && (new == p->index)) {
++ freqs->new = p->index;
++ /*
++ pr_debug("%s, found op%d\n",
++ __FUNCTION__, p->index);
++ */
++ break;
++ }
++ }
++ continue;
++ }
++
++ if (!p->device && (q->core >= core)) {
++ if (tmp_core == -1 || (tmp_core >= q->core)) {
++ /*
++ pr_debug("%s, found op%d, core:%d\n",
++ __FUNCTION__, p->index,
++ q->core);
++ */
++ if (first_op == 0)
++ first_op = p->index;
++ freqs->new = p->index;
++ tmp_core = q->core;
++ found = 1;
++ }
++ if (found && (new == p->index))
++ break;
++ }
++ }
++ if (found && (first_op == 1) && (new != p->index))
++ freqs->new = first_op;
++ } else if (relation == RELATION_HIGH) {
++ /* Set the highest frequency that is lower than specified one */
++ list_for_each_entry(p, &pxa3xx_dvfm_op_list.list, list) {
++ q = (struct dvfm_md_opt *)p->op;
++ if (!p->device && (q->core <= core)) {
++ if (tmp_core == -1 || tmp_core < q->core) {
++ freqs->new = p->index;
++ tmp_core = q->core;
++ }
++ }
++ }
++ } else if (relation == RELATION_STICK) {
++ /* Set the specified frequency */
++ list_for_each_entry(p, &pxa3xx_dvfm_op_list.list, list) {
++ if (!p->device && (p->index == new)) {
++ freqs->new = p->index;
++ break;
++ }
++ }
++ }
++ read_unlock(&pxa3xx_dvfm_op_list.lock);
++ if (freqs->new == -1) {
++ /*
++ pr_debug("%s, Can't find op\n", __FUNCTION__);
++ pr_debug("%s, old:%d, new:%d, core:%d\n", __FUNCTION__,
++ freqs->old, new, core);
++ */
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int pxa3xx_get_freq(void *driver_data, struct op_info *p, struct op_freq *freq)
++{
++ struct dvfm_md_opt *q = (struct dvfm_md_opt *)p->op;
++ freq->cpu_freq = q->core;
++ return 0;
++}
++
++static int pxa3xx_check_active_op(void *driver_data, struct op_info *p)
++{
++ struct dvfm_md_opt *q = (struct dvfm_md_opt *)p->op;
++
++ if ((!strcmp(q->name, "D0CS")) && (boot_core_freq >= q->core))
++ return 0;
++
++ if ((!strcmp(q->name, "104M")) && (boot_core_freq >= q->core))
++ return 0;
++
++ if ((!strcmp(q->name, "156M")) && (boot_core_freq >= q->core))
++ return 0;
++
++ if ((!strcmp(q->name, "208M")) && (boot_core_freq >= q->core))
++ return 0;
++
++ if ((!strcmp(q->name, "416M")) && (boot_core_freq >= q->core))
++ return 0;
++
++ if ((!strcmp(q->name, "624M")) && (boot_core_freq >= q->core))
++ return 0;
++
++ return -EINVAL;
++}
++
++
++static int pxa3xx_set_op(void *driver_data, struct dvfm_freqs *freqs, unsigned int new,
++ unsigned int relation)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ struct dvfm_md_opt *md = NULL, *old_md = NULL;
++ struct op_info *p = NULL;
++ unsigned long flags;
++ int ret;
++ out_d0cs = 0;
++
++ local_fiq_disable();
++ local_irq_save(flags);
++ ret = dvfm_find_op(freqs->old, &p);
++ if (ret) {
++ printk("---- pxa3xx_set_op1 check_op failed to %d\n",new);
++ goto out;
++ }
++
++ memcpy(&freqs->old_info, p, sizeof(struct op_info));
++ ret = check_op(info, freqs, new, relation);
++ if (ret) {
++ printk("---- pxa3xx_set_op2 check_op failed to %d\n",new);
++ goto out;
++ }
++
++ if (!dvfm_find_op(freqs->new, &p)) {
++ memcpy(&(freqs->new_info), p, sizeof(struct op_info));
++ /* If find old op and new op is same, skip it.
++ * At here, ret should be zero.
++ */
++ if (freqs->old_info.index == freqs->new_info.index)
++ goto out;
++#ifdef DVFM_LP_SAFE
++ md = (struct dvfm_md_opt *)(freqs->new_info.op);
++ old_md = (struct dvfm_md_opt *)(freqs->old_info.op);
++ if ((old_md->power_mode == POWER_MODE_D0CS)
++ && ((md->power_mode == POWER_MODE_D1)
++ || (md->power_mode == POWER_MODE_D2))) {
++ dvfm_disable_op_name("D0CS", dvfm_dev_id);
++ out_d0cs = 1;
++ }
++
++ md = (struct dvfm_md_opt *)p->op;
++ switch (md->power_mode) {
++ case POWER_MODE_D0:
++ case POWER_MODE_D0CS:
++ do_freq_notify(info, freqs);
++ break;
++ case POWER_MODE_D1:
++ case POWER_MODE_D2:
++ case POWER_MODE_CG:
++ do_lowpower_notify(info, freqs, md->power_mode);
++ break;
++ }
++ local_irq_restore(flags);
++ local_fiq_enable();
++
++ if (out_d0cs) {
++ dvfm_enable_op_name("D0CS", dvfm_dev_id);
++ }
++#else
++ md = (struct dvfm_md_opt *)p->op;
++ switch (md->power_mode) {
++ case POWER_MODE_D0:
++ case POWER_MODE_D0CS:
++ do_freq_notify(info, freqs);
++ break;
++ case POWER_MODE_D1:
++ case POWER_MODE_D2:
++ case POWER_MODE_CG:
++ do_lowpower_notify(info, freqs, md->power_mode);
++ break;
++ }
++ local_irq_restore(flags);
++ local_fiq_enable();
++#endif
++ }
++ return 0;
++out:
++ local_irq_restore(flags);
++ local_fiq_enable();
++ return ret;
++}
++
++static int pxa3xx_request_op(void *driver_data, int index)
++{
++ struct dvfm_freqs freqs;
++ struct op_info *info = NULL;
++ struct dvfm_md_opt *md = NULL;
++ int relation, ret;
++ ret = dvfm_find_op(index, &info);
++ if (ret)
++ goto out;
++ freqs.old = cur_op;
++ freqs.new = index;
++ md = (struct dvfm_md_opt *)(info->op);
++ switch (md->power_mode) {
++ case POWER_MODE_D1:
++ case POWER_MODE_D2:
++ case POWER_MODE_CG:
++ relation = RELATION_STICK;
++ ret = pxa3xx_set_op(driver_data, &freqs, index, relation);
++ break;
++ default:
++ relation = RELATION_LOW;
++ /* only use non-low power mode as preferred op */
++ ret = pxa3xx_set_op(driver_data, &freqs, index, relation);
++ if (!ret)
++ preferred_op = index;
++ break;
++ }
++out:
++ return ret;
++}
++
++static int is_d0cs(void *driver_data)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ unsigned int acsr;
++ /* read ACSR */
++ acsr = __raw_readl(info->clkmgr_base + ACSR_OFF);
++ /* Check ring oscillator status */
++ if (acsr & (1 << 26))
++ return 1;
++ return 0;
++}
++
++/* Produce a operating point table */
++static int op_init(void *driver_data, struct info_head *op_table)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ unsigned long flags;
++ int i, index;
++ struct op_info *p = NULL, *q = NULL;
++ struct dvfm_md_opt *md = NULL, *smd = NULL;
++ struct proc_op_array *proc = NULL;
++
++ write_lock_irqsave(&op_table->lock, flags);
++ for (i = 0; i < ARRAY_SIZE(proc_op_arrays); i++){
++ if (proc_op_arrays[i].cpuid == (info->cpuid & 0xfff0)) {
++ proc = &proc_op_arrays[i];
++ break;
++ }
++ }
++ if (proc == NULL) {
++ printk(KERN_ERR "Failed to find op tables for cpu_id 0x%08x", info->cpuid);
++ write_unlock_irqrestore(&op_table->lock, flags);
++ return -EIO;
++ } else {
++ printk("initializing op table for %s\n", proc->cpu_name);
++ }
++ for (i = 0, index = 0; i < proc->nr_op; i++) {
++ /* PXA310 A2 or PXA935/PXA940, dmcfs 60MHz in S0D0CS mode */
++ if ((proc->op_array[i].power_mode == POWER_MODE_D0CS)
++ && (info->cpuid == 0x6892 || (info->cpuid & 0xFFF0) == 0x6930)) {
++ set_dmc60(info, 1);
++ proc->op_array[i].dmcfs = 60;
++ }
++
++ /* Set index of operating point used in idle */
++ if (proc->op_array[i].power_mode != POWER_MODE_D0) {
++ //set_idle_op(index, proc->op_array[i].power_mode);
++ }
++
++ md = (struct dvfm_md_opt *)kzalloc(sizeof(struct dvfm_md_opt),
++ GFP_KERNEL);
++ p = (struct op_info *)kzalloc(sizeof(struct op_info),
++ GFP_KERNEL);
++ p->op = (void *)md;
++ memcpy(p->op, &proc->op_array[i], sizeof(struct dvfm_md_opt));
++ md->core = 13 * md->xl * md->xn;
++ if (md->power_mode == POWER_MODE_D0CS)
++ md->core = 60;
++ p->index = index++;
++ list_add_tail(&(p->list), &(op_table->list));
++ }
++ md = (struct dvfm_md_opt *)kzalloc(sizeof(struct dvfm_md_opt),
++ GFP_KERNEL);
++ p = (struct op_info *)kzalloc(sizeof(struct op_info), GFP_KERNEL);
++ p->op = (void *)md;
++ if (capture_op_info(info, md)) {
++ printk(KERN_WARNING "Failed to get current op setting\n");
++ } else {
++ def_op = 0x5a5a; /* magic number */
++ list_for_each_entry(q, &(op_table->list), list) {
++ smd = (struct dvfm_md_opt *)q->op;
++ md->flag = smd->flag;
++ md->lpj = smd->lpj;
++ md->core = smd->core;
++ if (memcmp(md, smd, sizeof(struct dvfm_md_opt)) == 0) {
++ def_op = q->index;
++ break;
++ }
++ }
++ }
++ if (is_d0cs(driver_data))
++ md->core = 60;
++ else
++ md->core = 13 * md->xl * md->xn;
++ md->lpj = loops_per_jiffy;
++ md->flag = OP_FLAG_BOOT;
++ sprintf(md->name, "BOOT OP");
++
++ boot_core_freq = md->core;
++
++#if 0 /* disable CUSTOM OP for borq platfrom */
++ smd = (struct dvfm_md_opt *)kzalloc(sizeof(struct dvfm_md_opt),
++ GFP_KERNEL);
++ q = (struct op_info *)kzalloc(sizeof(struct op_info), GFP_KERNEL);
++ memcpy(q, p, sizeof(struct op_info));
++ memcpy(smd, md, sizeof(struct dvfm_md_opt));
++ smd->core = md->core;
++ smd->lpj = md->lpj;
++ smd->flag = OP_FLAG_USER_DEFINED;
++ sprintf(smd->name, "CUSTOM OP");
++ q->op = (void *)smd;
++ /* Add CUSTOM OP into op list */
++ q->index = index++;
++ list_add_tail(&q->list, &op_table->list);
++#endif
++ /* Add BOOT OP into op list */
++ p->index = index++;
++ preferred_op = p->index;
++ list_add_tail(&p->list, &op_table->list);
++ /* BOOT op */
++ if (def_op == 0x5a5a) {
++ cur_op = p->index;
++ def_op = p->index;
++ } else
++ cur_op = def_op;
++ pr_debug("%s, def_op:%d, cur_op:%d\n", __FUNCTION__, def_op, cur_op);
++
++ op_nums = proc->nr_op + 2; /* set the operating point number */
++
++ pr_debug("Current Operating Point is %d\n", cur_op);
++ dump_op_list(info, op_table, OP_FLAG_ALL);
++ write_unlock_irqrestore(&op_table->lock, flags);
++
++ return 0;
++}
++
++/*
++ * The machine operation of dvfm_enable
++ */
++static int pxa3xx_enable_dvfm(void *driver_data, int dev_id)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ struct dvfm_md_opt *md = NULL;
++ struct op_info *p = NULL;
++ int i, num;
++ num = get_op_num(info, &pxa3xx_dvfm_op_list);
++ for (i = 0; i < num; i++) {
++ if (!dvfm_find_op(i, &p)) {
++ md = (struct dvfm_md_opt *)p->op;
++ if (md->core < boot_core_freq)
++ dvfm_enable_op_name(md->name, dev_id);
++ }
++ }
++ ispt_block_dvfm(0, dev_id);
++ return 0;
++}
++
++/*
++ * The mach operation of dvfm_disable
++ */
++static int pxa3xx_disable_dvfm(void *driver_data, int dev_id)
++{
++ struct pxa3xx_dvfm_info *info = driver_data;
++ struct dvfm_md_opt *md = NULL;
++ struct op_info *p = NULL;
++ int i, num;
++ num = get_op_num(info, &pxa3xx_dvfm_op_list);
++ for (i = 0; i < num; i++) {
++ if (!dvfm_find_op(i, &p)) {
++ md = (struct dvfm_md_opt *)p->op;
++ if (md->core < boot_core_freq)
++ dvfm_disable_op_name(md->name, dev_id);
++ }
++ }
++ ispt_block_dvfm(1, dev_id);
++ return 0;
++}
++
++static int pxa3xx_enable_op(void *driver_data, int index, int relation)
++{
++ /*
++ * Restore preferred_op. Because this op is sugguested by policy maker
++ * or user.
++ */
++ return pxa3xx_request_op(driver_data, preferred_op);
++}
++
++static int pxa3xx_disable_op(void *driver_data, int index, int relation)
++{
++ struct dvfm_freqs freqs;
++ if (cur_op == index) {
++ freqs.old = index;
++ freqs.new = -1;
++ dvfm_set_op(&freqs, freqs.old, relation);
++ }
++ return 0;
++}
++
++static int pxa3xx_volt_show(void *driver_data, char *buf)
++{
++ struct dvfm_md_opt new;
++ int len = 0;
++
++ memset(&new, 0, sizeof(struct dvfm_md_opt));
++ pxa3xx_pmic_get_voltage(VCC_CORE, &new.vcc_core);
++ pxa3xx_pmic_get_voltage(VCC_SRAM, &new.vcc_sram);
++ len = sprintf(buf, "core voltage:%dmv, sram voltage:%dmv\n",
++ new.vcc_core, new.vcc_sram);
++ return len;
++}
++
++#ifdef CONFIG_CPU_PXA310
++static int pxa3xx_freq_show(void *driver_data, struct op_info *p, char *buf)
++{
++ struct dvfm_md_opt *q = (struct dvfm_md_opt *)p->op;
++ struct pxa3xx_fv_info info;
++
++ if (q == NULL)
++ return sprintf(buf, "unable to get frequency info\n");
++ else {
++ freq2reg(&info, q);
++ if (!info.d0cs){
++ return sprintf(buf, "current frequency is %luMhz"
++ " (XL: %lu, XN: %lu, %s) with\n"
++ " SMEM: %lu (%dMhz)\n"
++ " SRAM: %lu (%dMhz)\n"
++ " HSS: %lu (%dMhz)\n"
++ " DDR: %lu (%dMhz)\n"
++ " DFCLK: %lu (%dMhz)\n"
++ " EMPICLK: %lu (%dMhz)\n"
++ " D0CKEN_A: 0x%08x\n"
++ " D0CKEN_B: 0x%08x\n"
++ " ACCR: 0x%08x\n"
++ " ACSR: 0x%08x\n"
++ " OSCC: 0x%08x\n",
++ FREQ_CORE(info.xl, info.xn), info.xl, info.xn,
++ (info.xn != 0x1)? "Turbo Mode" : "Run Mode",
++ info.smcfs, FREQ_STMM(info.smcfs),
++ info.sflfs, FREQ_SRAM(info.sflfs),
++ info.hss, FREQ_HSS(info.hss),
++ info.dmcfs, FREQ_DDR(info.dmcfs),
++ info.df_clk, FREQ_DFCLK(info.smcfs, info.df_clk),
++ info.empi_clk, FREQ_EMPICLK(info.smcfs, info.empi_clk),
++ CKENA, CKENB, ACCR, ACSR, OSCC);
++ } else {
++ return sprintf(buf, "current frequency is 60Mhz"
++ " (ring oscillator mode) with\n"
++ " SMEM:15Mhz\n"
++ " SRAM:60Mhz\n"
++ " HSS:60Mhz\n"
++ " DDR:30Mhz\n"
++ " DFCLK:%sMhz\n"
++ " EMPICLK:%sMhz\n"
++ " D0CKEN_A: 0x%08x\n"
++ " D0CKEN_B: 0x%08x\n"
++ " ACCR: 0x%08x\n"
++ " ACSR: 0x%08x\n"
++ " OSCC: 0x%08x\n",
++ (info.df_clk == 1)?"15":
++ (info.df_clk == 2)?"7.5":
++ (info.df_clk == 3)?"3.75":"0",
++ (info.empi_clk == 1)?"15":
++ (info.empi_clk == 2)?"7.5":
++ (info.empi_clk == 3)?"3.75":"0",
++ CKENA, CKENB, ACCR, ACSR, OSCC);
++ }
++
++
++ }
++}
++#endif
++
++#ifdef CONFIG_PXA3xx_DVFM_STATS
++/* Convert ticks from 32K timer to microseconds */
++static unsigned int pxa3xx_ticks_to_usec(unsigned int ticks)
++{
++ return (ticks * 5 * 5 * 5 * 5 * 5 * 5) >> 9;
++}
++
++static unsigned int pxa3xx_ticks_to_sec(unsigned int ticks)
++{
++ return (ticks >> 15);
++}
++
++static unsigned int pxa3xx_read_time(void)
++{
++ return OSCR4;
++}
++
++/* It's invoked by PM functions.
++ * PM functions can store the accurate time of entering/exiting low power
++ * mode.
++ */
++int calc_switchtime(unsigned int end, unsigned int start)
++{
++ switch_lowpower_before = end;
++ switch_lowpower_after = start;
++ return 0;
++}
++
++static int pxa3xx_stats_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ struct dvfm_freqs *freqs = (struct dvfm_freqs *)data;
++ struct op_info *info = &(freqs->new_info);
++ struct dvfm_md_opt *md = NULL;
++ unsigned int ticks;
++
++ ticks = pxa3xx_read_time();
++ md = (struct dvfm_md_opt *)(info->op);
++ if (md->power_mode == POWER_MODE_D0 ||
++ md->power_mode == POWER_MODE_D0CS) {
++ switch (val) {
++ case DVFM_FREQ_PRECHANGE:
++ calc_switchtime_start(freqs->old, freqs->new, ticks);
++ break;
++ case DVFM_FREQ_POSTCHANGE:
++ /* Calculate the costed time on switching frequency */
++ calc_switchtime_end(freqs->old, freqs->new, ticks);
++ dvfm_add_event(freqs->old, CPU_STATE_RUN,
++ freqs->new, CPU_STATE_RUN);
++ dvfm_add_timeslot(freqs->old, CPU_STATE_RUN);
++ mspm_add_event(freqs->old, CPU_STATE_RUN);
++ break;
++ }
++ } else if (md->power_mode == POWER_MODE_D1 ||
++ md->power_mode == POWER_MODE_D2 ||
++ md->power_mode == POWER_MODE_CG) {
++ switch (val) {
++ case DVFM_FREQ_PRECHANGE:
++ calc_switchtime_start(freqs->old, freqs->new, ticks);
++ /* Consider lowpower mode as idle mode */
++ dvfm_add_event(freqs->old, CPU_STATE_RUN,
++ freqs->new, CPU_STATE_IDLE);
++ dvfm_add_timeslot(freqs->old, CPU_STATE_RUN);
++ mspm_add_event(freqs->old, CPU_STATE_RUN);
++ break;
++ case DVFM_FREQ_POSTCHANGE:
++ /* switch_lowpower_start before switch_lowpower_after
++ * is updated in calc_switchtime().
++ * It's invoked in pm function.
++ */
++ calc_switchtime_end(freqs->old, freqs->new,
++ switch_lowpower_before);
++ calc_switchtime_start(freqs->new, freqs->old,
++ switch_lowpower_after);
++ calc_switchtime_end(freqs->new, freqs->old,
++ ticks);
++ dvfm_add_event(freqs->new, CPU_STATE_IDLE,
++ freqs->old, CPU_STATE_RUN);
++ dvfm_add_timeslot(freqs->new, CPU_STATE_IDLE);
++ mspm_add_event(freqs->new, CPU_STATE_IDLE);
++ break;
++ }
++ }
++ return 0;
++}
++#else
++#define pxa3xx_ticks_to_usec NULL
++#define pxa3xx_ticks_to_sec NULL
++#define pxa3xx_read_time NULL
++#endif
++
++static struct dvfm_driver pxa3xx_driver = {
++ .count = get_op_num,
++ .set = pxa3xx_set_op,
++ .dump = dump_op,
++ .name = get_op_name,
++ .request_set = pxa3xx_request_op,
++ .enable_dvfm = pxa3xx_enable_dvfm,
++ .disable_dvfm = pxa3xx_disable_dvfm,
++ .enable_op = pxa3xx_enable_op,
++ .disable_op = pxa3xx_disable_op,
++ .volt_show = pxa3xx_volt_show,
++#ifdef CONFIG_CPU_PXA310
++ .freq_show = pxa3xx_freq_show,
++#endif
++ .ticks_to_usec = pxa3xx_ticks_to_usec,
++ .ticks_to_sec = pxa3xx_ticks_to_sec,
++ .read_time = pxa3xx_read_time,
++ .get_freq = pxa3xx_get_freq,
++ .check_active_op = pxa3xx_check_active_op,
++};
++
++#ifdef CONFIG_PM
++static int pxa3xx_freq_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ current_op = cur_op;
++ dvfm_request_op(1);
++ return 0;
++}
++
++static int pxa3xx_freq_resume(struct platform_device *pdev)
++{
++ dvfm_request_op(current_op);
++ return 0;
++}
++#else
++#define pxa3xx_freq_suspend NULL
++#define pxa3xx_freq_resume NULL
++#endif
++
++static void pxa3xx_poweri2c_init(struct pxa3xx_dvfm_info *info)
++{
++ uint32_t avcr, svcr, cvcr, pcfr, pvcr;
++
++ if ((info->flags & PXA3xx_USE_POWER_I2C) &&
++ ((info->cpuid & 0xfff0) == 0x6930)) {
++ /* set AVCR for PXA935/PXA940:
++ * level 0: 1250mv, 0x15
++ * level 1: 1250mv, 0x15
++ * level 2: 1250mv, 0x15
++ * level 3: 1250mv, 0x15
++ */
++ avcr = __raw_readl(info->spmu_base + AVCR_OFF);
++ avcr &= 0xE0E0E0E0;
++ avcr |= (0x15 << 24) | (0x15 << 16) | (0x15 << 8) | 0x15;
++ __raw_writel(avcr, info->spmu_base + AVCR_OFF);
++ avcr = __raw_readl(info->spmu_base + AVCR_OFF);
++
++ /* set delay */
++ pcfr = __raw_readl(info->spmu_base + PCFR_OFF);
++ pcfr &= 0x000FFFFF;
++ pcfr |= 0xCCF00000;
++ /* Disable pullup/pulldown in PWR_SCL and PWR_SDA */
++ pcfr |= 0x04;
++ __raw_writel(pcfr, info->spmu_base + PCFR_OFF);
++ pcfr = __raw_readl(info->spmu_base + PCFR_OFF);
++
++ /* enable FVE,PVE,TVE bit */
++ __raw_writel(0xe0500034, info->spmu_base + PVCR_OFF);
++ } else if (info->flags & PXA3xx_USE_POWER_I2C) {
++ /* set AVCR for PXA300/PXA310/PXA320/PXA930
++ * level 0: 1000mv, 0x0b
++ * level 1: 1100mv, 0x0f
++ * level 2: 1375mv, 0x1a
++ * level 3: 1400mv, 0x1b
++ */
++ avcr = __raw_readl(info->spmu_base + AVCR_OFF);
++ avcr &= 0xE0E0E0E0;
++ /* PXA930 B0(cpuid 0x6835) requires special setting */
++ if (info->cpuid == 0x6835)
++ avcr |= (0x1b << 24) | (0x1a << 16) | (0x0f << 8) | 0xb;
++ else
++ avcr |= (0x0f << 24) | (0x1a << 16) | (0x0f << 8) | 0xb;
++ __raw_writel(avcr, info->spmu_base + AVCR_OFF);
++ avcr = __raw_readl(info->spmu_base + AVCR_OFF);
++ /* set SVCR:
++ * level 0: 1100mv, 0x0f
++ * level 1: 1200mv, 0x13
++ * level 2: 1400mv, 0x1b
++ * level 3: 1400mv, 0x1b
++ */
++ svcr = __raw_readl(info->spmu_base + SVCR_OFF);
++ svcr &= 0xE0E0E0E0;
++ if (info->cpuid == 0x6835)
++ svcr |= (0x1b << 24) | (0x1b << 16) | (0x13 << 8) | 0xf;
++ else
++ svcr |= (0x0f << 24) | (0x1b << 16) | (0x13 << 8) | 0xf;
++ __raw_writel(svcr, info->spmu_base + SVCR_OFF);
++ svcr = __raw_readl(info->spmu_base + SVCR_OFF);
++ /* set CVCR:
++ * level 0: 925mv, 0x08
++ * level 1: 1250mv, 0x15
++ * level 2: 1375mv, 0x1a
++ * level 3: 1400mv, 0x1b
++ */
++ cvcr = __raw_readl(info->spmu_base + CVCR_OFF);
++ cvcr &= 0xE0E0E0E0;
++ if (info->cpuid == 0x6835)
++ cvcr |= (0x1b << 24) | (0x1a << 16) | (0x15 << 8) | 0x08;
++ else
++ cvcr |= (0x0f << 24) | (0x1a << 16) | (0x15 << 8) | 0x08;
++ __raw_writel(cvcr, info->spmu_base + CVCR_OFF);
++ cvcr = __raw_readl(info->spmu_base + CVCR_OFF);
++
++ /* set delay */
++ pcfr = __raw_readl(info->spmu_base + PCFR_OFF);
++ pcfr &= 0x000FFFFF;
++ pcfr |= 0xCCF00000;
++ /* Disable pullup/pulldown in PWR_SCL and PWR_SDA */
++ pcfr |= 0x04;
++ __raw_writel(pcfr, info->spmu_base + PCFR_OFF);
++ pcfr = __raw_readl(info->spmu_base + PCFR_OFF);
++
++ /* enable FVE,PVE,TVE bit */
++ __raw_writel(0xe0500034, info->spmu_base + PVCR_OFF);
++ } else {
++ /* disable FVE,PVE,TVE,FVC bit */
++ pvcr = __raw_readl(info->spmu_base + PVCR_OFF);
++ pvcr &= 0x0fffffff;
++ __raw_writel(pvcr, info->spmu_base + PVCR_OFF);
++ }
++}
++
++int gpio_reset_work_around(void)
++{
++ dvfm_disable_op_name("624M", dvfm_dev_id);
++ dvfm_disable_op_name("416M", dvfm_dev_id);
++ dvfm_disable_op_name("208M", dvfm_dev_id);
++ return 0;
++}
++
++static int pxa3xx_freq_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct pxa3xx_freq_mach_info *pdata;
++ struct pxa3xx_dvfm_info *info;
++ int rc;
++
++ /* initialize the information necessary to frequency/voltage change operation */
++ pdata = pdev->dev.platform_data;
++ info = kzalloc(sizeof(struct pxa3xx_dvfm_info), GFP_KERNEL);
++ info->flags = pdata->flags;
++ info->cpuid = read_cpuid(0) & 0xFFFF;
++
++ //info->lcd_clk = clk_get(&pxa_device_fb.dev, "LCDCLK");
++ //if (IS_ERR(info->lcd_clk)) goto err;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clkmgr_regs");
++ if (!res) goto err;
++ info->clkmgr_base = ioremap(res->start, res->end - res->start + 1);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spmu_regs");
++ if (!res) goto err;
++ info->spmu_base = ioremap(res->start, res->end - res->start + 1);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bpmu_regs");
++ if (!res) goto err;
++ info->bpmu_base = ioremap(res->start, res->end - res->start + 1);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc_regs");
++ if (!res) goto err;
++ info->dmc_base = ioremap(res->start, res->end - res->start + 1);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc_regs");
++ if (!res) goto err;
++ info->smc_base = ioremap(res->start, res->end - res->start + 1);
++
++ pxa3xx_driver.priv = info;
++
++ pxa3xx_poweri2c_init(info);
++ op_init(info, &pxa3xx_dvfm_op_list);
++
++ return dvfm_register_driver(&pxa3xx_driver, &pxa3xx_dvfm_op_list);
++err:
++ printk("pxa3xx_dvfm init failed\n");
++ return -EIO;
++}
++
++static int pxa3xx_freq_remove(struct platform_device *pdev)
++{
++ kfree(pxa3xx_driver.priv);
++ return dvfm_unregister_driver(&pxa3xx_driver);
++}
++
++static struct platform_driver pxa3xx_freq_driver = {
++ .driver = {
++ .name = "pxa3xx-freq",
++ },
++ .probe = pxa3xx_freq_probe,
++ .remove = pxa3xx_freq_remove,
++#ifdef CONFIG_PM
++ //.suspend = pxa3xx_freq_suspend,
++ //.resume = pxa3xx_freq_resume,
++#endif
++};
++
++
++static int __init pxa3xx_freq_init(void)
++{
++ int ret;
++ ret = platform_driver_register(&pxa3xx_freq_driver);
++ if (ret)
++ goto out;
++#ifdef CONFIG_PXA3xx_DVFM_STATS
++ ret = dvfm_register_notifier(&notifier_freq_block,
++ DVFM_FREQUENCY_NOTIFIER);
++#endif
++ ret = dvfm_register("DVFM", &dvfm_dev_id);
++out:
++ return ret;
++}
++
++static void __exit pxa3xx_freq_exit(void)
++{
++#ifdef CONFIG_PXA3xx_DVFM_STATS
++ dvfm_unregister_notifier(&notifier_freq_block,
++ DVFM_FREQUENCY_NOTIFIER);
++#endif
++ dvfm_unregister("DVFM", &dvfm_dev_id);
++ platform_driver_unregister(&pxa3xx_freq_driver);
++}
++
++module_init(pxa3xx_freq_init);
++module_exit(pxa3xx_freq_exit);
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/pxa3xx_dvfm_ll.S kernel/arch/arm/mach-pxa/pxa3xx_dvfm_ll.S
+--- linux-2.6.32/arch/arm/mach-pxa/pxa3xx_dvfm_ll.S 2009-12-13 13:00:42.108609192 +0200
++++ kernel/arch/arm/mach-pxa/pxa3xx_dvfm_ll.S 2009-12-12 16:09:26.482948915 +0200
+@@ -0,0 +1,261 @@
++@
++@ This program is free software; you can redistribute it and/or modify
++@ it under the terms of the GNU General Public License as published by
++@ the Free Software Foundation; either version 2 of the License, or
++@ (at your option) any later version.
++@
++@ This program is distributed in the hope that it will be useful,
++@ but WITHOUT ANY WARRANTY; without even the implied warranty of
++@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++@ GNU General Public License for more details.
++@
++@ You should have received a copy of the GNU General Public License
++@ along with this program; if not, write to the Free Software
++@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++@
++@
++@ FILENAME: pxa3xx_dvfm_ll.S
++@
++@ PURPOSE: Provides low level DVFM primitive functions written specifically
++@ for the Monahans/Zylonite processor/platform.
++@
++@******************************************************************************
++
++
++@
++@ List of primitive functions in this module:
++@
++ .global enter_d0cs_a
++ .global exit_d0cs_a
++ .global pxa_clkcfg_read
++ .global pxa_clkcfg_write
++
++.equ CLKMGR_ACCR_OFFSET,0x0000
++.equ CLKMGR_ACSR_OFFSET,0x0004
++
++.equ DMEMC_MDCNFG_OFFSET, 0x0000
++.equ DMEMC_DDRHCAL_OFFSET,0x0060
++
++ .text
++
++@
++@
++@ UINT32 enter_d0cs_a
++@
++@
++@ Description:
++@ put system into D0CS mode.
++@
++@ Input Parameters:
++@ r0 - arg1, the address of Clock Manager Controller
++@ r1 - arg2, the address of Dynamic Memory controller
++@ Returns:
++@ r0 - success (0) or failure(1)
++@
++@ Registers Modified:
++@ ACCR, MDCNFG, DDR_HCAL
++@ General Purpose Registers Modified: r3, r4
++@
++@ NOTE:
++@
++
++enter_d0cs_a:
++ stmfd sp!, {r3, r4, lr}
++ @
++ @ return directly if current mode is D0CS already
++ @
++ ldr r3, [r0, #CLKMGR_ACSR_OFFSET] @ load ACSR
++ tst r3, #0x04000000
++ movne r0, #0
++ bne 6f
++0:
++ @
++ @ set DMEMC.MDCFG[29]
++ @
++ ldr r3, [r1, #DMEMC_MDCNFG_OFFSET] @ get MDCNFG
++ orr r3, r3, #0x20000000 @ Set DMEMC.MDCNFG[29].
++ str r3, [r1, #DMEMC_MDCNFG_OFFSET] @ load MDCNFG
++1:
++ ldr r3, [r1, #DMEMC_MDCNFG_OFFSET] @ ensure DMEMC.MDCNFG[29] bit is written
++ tst r3, #0x20000000
++ beq 1b
++
++ @
++ @ clear DMEMC.DDR_HCAL[31]
++ @
++ ldr r3, [r1, #DMEMC_DDRHCAL_OFFSET] @ get DDR_HCAL
++ bic r3, r3, #0x80000000 @ Insure DDR_HCAL[31] is clear
++ str r3, [r1, #DMEMC_DDRHCAL_OFFSET] @ load DDR_HCAL
++2:
++ ldr r3, [r1, #DMEMC_DDRHCAL_OFFSET] @ Insure DDR_HCAL[31] is clear
++ tst r3, #0x80000000
++ bne 2b
++
++ @
++ @ set ACCR[D0CS] bit
++ @
++ ldr r3, [r0, #CLKMGR_ACCR_OFFSET] @ get ACCR
++ orr r3, r3, #0x04000000 @ set D0CS bit in ACCR
++ str r3, [r0, #CLKMGR_ACCR_OFFSET] @ load ACCR
++3:
++ ldr r3, [r0, #CLKMGR_ACCR_OFFSET] @ ensure D0CS bit is written
++ tst r3, #0x04000000
++ beq 3b
++
++ @
++ @ enter D0CS mode
++ @
++ mov r4, #5 @ r4: power mode
++ b enterd0cs @ skip the garbage before .align 5
++ .align 5
++enterd0cs:
++ mcr p14, 0, r4, c7, c0, 0 @ enter D0CS mode
++4: @ wait for system to enter D0CS really
++ ldr r3, [r0, #CLKMGR_ACSR_OFFSET] @ load ACSR
++ tst r3, #0x04000000
++ beq 4b
++5: @ wait for DMEMC.MDCNFG[29] clear
++ ldr r3, [r1, #DMEMC_MDCNFG_OFFSET]
++ tst r3, #0x20000000
++ bne 5b
++
++6:
++ @
++ @ return
++ @
++ mov r0, #0
++ ldmfd sp!, {r3, r4, pc} @ return
++
++@
++@
++@ UINT32 exit_d0cs_a
++@
++@
++@ Description:
++@ let system exit D0CS mode.
++@
++@ r0 - arg1, the address of Clock Manager Controller
++@ r1 - arg2, the address of Dynamic Memory controller
++@ Returns:
++@ r0 - success (0) or failure(1)
++@
++@ Registers Modified:
++@ ACCR, MDCNFG, DDR_HCAL
++@ General Purpose Registers Modified: r3, r4
++@
++@ NOTE:
++@
++
++exit_d0cs_a:
++ stmfd sp!, {r3,r4,lr}
++ @
++ @ return directly if current mode is not D0CS
++ @
++ ldr r3, [r0, #CLKMGR_ACSR_OFFSET] @ load ACSR
++ tst r3, #0x04000000
++ beq 6f
++0:
++ @
++ @ set DMEMC.MDCFG[29]
++ @
++ ldr r3, [r1, #DMEMC_MDCNFG_OFFSET] @ get MDCNFG
++ orr r3, r3, #0x20000000 @ Set DMEMC.MDCNFG[29].
++ str r3, [r1, #DMEMC_MDCNFG_OFFSET] @ load MDCNFG
++1:
++ ldr r3, [r1, #DMEMC_MDCNFG_OFFSET] @ ensure DMEMC.MDCNFG[29] bit is written
++ tst r3, #0x20000000
++ beq 1b
++
++ @
++ @ set DMEMC.DDR_HCAL[31]
++ @
++ ldr r3, [r1, #DMEMC_DDRHCAL_OFFSET] @ get DDR_HCAL
++ orr r3, r3, #0x80000000 @ Insure DDR_HCAL[31] is set
++ str r3, [r1, #DMEMC_DDRHCAL_OFFSET] @ load DDR_HCAL
++2:
++ ldr r3, [r1, #DMEMC_DDRHCAL_OFFSET] @ Insure DDR_HCAL[31] is set
++ tst r3, #0x80000000
++ beq 2b
++
++ @
++ @ clear ACCR[D0CS] bit
++ @
++ ldr r3, [r0, #CLKMGR_ACCR_OFFSET] @ get ACCR
++ bic r3, r3, #0x04000000 @ clear D0CS bit in ACCR
++ str r3, [r0, #CLKMGR_ACCR_OFFSET] @ load ACCR
++3:
++ ldr r3, [r0, #CLKMGR_ACCR_OFFSET] @ ensure D0CS bit is clear
++ tst r3, #0x04000000
++ bne 3b
++
++ @
++ @ exit D0CS mode
++ @
++ mov r4, #5 @ r4: power mode
++ b exitd0cs @ skip the garbage before .align 5
++ .align 5
++exitd0cs:
++ mcr p14, 0, r4, c7, c0, 0 @ exit D0CS mode
++4: @ wait for system to exit D0CS really
++ ldr r3, [r0, #CLKMGR_ACSR_OFFSET] @ load ACSR
++ tst r3, #0x04000000
++ bne 4b
++5: @ wait for DMEMC.MDCNFG[29] clear
++ ldr r3, [r1, #DMEMC_MDCNFG_OFFSET]
++ tst r3, #0x20000000
++ bne 5b
++6:
++ @
++ @ return
++ @
++ mov r0, #0
++ ldmfd sp!, {r3,r4,pc} @ return
++
++@
++@ UINT32 pxa_clkcfg_read
++@
++@ Description:
++@ This routine reads the designated PMU register via CoProcesser 14.
++@
++@ Input Parameters:
++@
++@ Returns:
++@ r0 - clkcfg value
++@
++@ Registers Modified:
++@ CoProcessor Register Modified: None
++@ General Purpose Registers Modified: None
++@
++@
++
++pxa_clkcfg_read:
++ mrc p14, 0, r0, c6, c0, 0 @ Read clkcfg
++ bx lr @ return
++
++
++
++@
++@ void pxa_clkcfg_write
++@
++@ Description:
++@ This routine writes to the designated ClkCFG register via CoProcesser 14.
++@
++@ Input Parameters:
++@ r0 - arg1 - Value to write to ClkCFG register
++@
++
++@ Returns:
++@ None
++@
++@ Registers Modified:
++@ CoProcessor Register Modified: ClkCFG Register
++@ General Purpose Registers Modified: None
++@
++@ NOTE
++@ Error checking not included
++@
++
++pxa_clkcfg_write:
++ mcr p14, 0, r0, c6, c0, 0 @ Write ClkCFG
++ bx lr @ return
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/pxa3xx_pmic.c kernel/arch/arm/mach-pxa/pxa3xx_pmic.c
+--- linux-2.6.32/arch/arm/mach-pxa/pxa3xx_pmic.c 2009-12-13 13:00:47.651947246 +0200
++++ kernel/arch/arm/mach-pxa/pxa3xx_pmic.c 2009-12-12 16:09:26.482948915 +0200
+@@ -0,0 +1,394 @@
++/*
++ * Monahans PMIC abstrction layer
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++
++ * (C) Copyright 2007 Marvell International Ltd.
++ * All Rights Reserved
++ */
++
++#include <mach/pxa3xx_pmic.h>
++
++#include <mach/mfp.h>
++static struct pmic_ops *pxa3xx_pmic_ops;
++
++#ifdef DEBUG
++/* calculate the elapsed time on operating PMIC */
++static unsigned int start_time, end_time;
++void start_calc_time(void)
++{
++ start_time = OSCR;
++}
++
++void end_calc_time(void)
++{
++ unsigned int time;
++ end_time = OSCR
++ time = (end_time - start_time) * 100 / 325;
++
++ pr_debug("\n%s:\t:%dus\n", __func__, time);
++}
++#else
++void start_calc_time(void) {}
++void end_calc_time(void) {}
++#endif
++
++void pmic_set_ops(struct pmic_ops *ops)
++{
++ printk("pmic_set_ops:%x\n", ops);
++ if (pxa3xx_pmic_ops != NULL) {
++ printk(KERN_ERR "set pmic_ops when pmic_ops is not NULL\n");
++ return;
++ }
++ pxa3xx_pmic_ops = ops;
++ INIT_LIST_HEAD(&pxa3xx_pmic_ops->list);
++ spin_lock_init(&pxa3xx_pmic_ops->cb_lock);
++}
++
++/*****************************************************************************
++ * Operation of PMIC *
++ *****************************************************************************/
++int check_pmic_ops(void)
++{
++ if (!pxa3xx_pmic_ops) {
++ printk(KERN_WARNING "No pmic_ops registered!\n");
++ return -EINVAL;
++ } else
++ return 0;
++}
++
++int pxa3xx_pmic_get_voltage(int cmd, int *pval)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->get_voltage)
++ return pxa3xx_pmic_ops->get_voltage(cmd, pval);
++ else
++ return -EINVAL;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_get_voltage);
++
++int pxa3xx_pmic_set_voltage(int cmd, int val)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->set_voltage)
++ return pxa3xx_pmic_ops->set_voltage(cmd, val);
++ else
++ return -EINVAL;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_set_voltage);
++
++int pxa3xx_pmic_check_voltage(int cmd)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->check_voltage)
++ return pxa3xx_pmic_ops->check_voltage(cmd);
++ else
++ return -EINVAL;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_check_voltage);
++
++int pxa3xx_pmic_enable_voltage(int cmd, int enable)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->enable_voltage)
++ return pxa3xx_pmic_ops->enable_voltage(cmd, enable);
++ else
++ return -EINVAL;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_enable_voltage);
++
++int pxa3xx_pmic_enable_led(int cmd, int enable)
++{
++ int ret;
++
++ ret=check_pmic_ops();
++ if (ret > 0)
++ return ret;
++
++ if(pxa3xx_pmic_ops->enable_led)
++ return pxa3xx_pmic_ops->enable_led(cmd, enable);
++ else
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(pxa3xx_pmic_enable_led);
++
++int pxa3xx_pmic_is_vbus_assert(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0) /* If illegal pmic_ops, always no vbus activity */
++ return 0;
++
++ if (pxa3xx_pmic_ops->is_vbus_assert)
++ return pxa3xx_pmic_ops->is_vbus_assert();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_vbus_assert);
++
++int pxa3xx_pmic_is_avbusvld(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0) /* If illegal pmic_ops, always no A vbus valid */
++ return 0;
++
++ if (pxa3xx_pmic_ops->is_avbusvld)
++ return pxa3xx_pmic_ops->is_avbusvld();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_avbusvld);
++
++int pxa3xx_pmic_is_asessvld(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0) /* If illegal pmic_ops, always no A assert valid */
++ return 0;
++
++ if (pxa3xx_pmic_ops->is_asessvld)
++ return pxa3xx_pmic_ops->is_asessvld();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_asessvld);
++
++int pxa3xx_pmic_is_bsessvld(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0) /* If illegal pmic_ops, always no B assert valid */
++ return 0;
++
++ if (pxa3xx_pmic_ops->is_bsessvld)
++ return pxa3xx_pmic_ops->is_bsessvld();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_bsessvld);
++
++int pxa3xx_pmic_is_srp_ready(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0) /* If illegal pmic_ops, always no SRP detect */
++ return 0;
++
++ if (pxa3xx_pmic_ops->is_srp_ready)
++ return pxa3xx_pmic_ops->is_srp_ready();
++
++ return 0;
++
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_srp_ready);
++
++int pxa3xx_pmic_set_pump(int enable)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->set_pump)
++ return pxa3xx_pmic_ops->set_pump(enable);
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_set_pump);
++
++int pxa3xx_pmic_set_vbus_supply(int enable, int srp)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->set_vbus_supply)
++ return pxa3xx_pmic_ops->set_vbus_supply(enable, srp);
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_set_vbus_supply);
++
++int pxa3xx_pmic_set_usbotg_a_mask(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->set_usbotg_a_mask)
++ return pxa3xx_pmic_ops->set_usbotg_a_mask();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_set_usbotg_a_mask);
++
++int pxa3xx_pmic_set_usbotg_b_mask(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->set_usbotg_b_mask)
++ return pxa3xx_pmic_ops->set_usbotg_b_mask();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_set_usbotg_b_mask);
++
++int pxa3xx_pmic_is_onkey_assert(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->is_onkey_assert)
++ return pxa3xx_pmic_ops->is_onkey_assert();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_onkey_assert);
++
++/* Register pmic callback */
++int pmic_callback_register(unsigned long event,
++ void (*func)(unsigned long event))
++{
++ int ret;
++ unsigned long flags;
++ struct pmic_callback *pmic_cb;
++
++ might_sleep();
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ pmic_cb = kzalloc(sizeof(*pmic_cb), GFP_KERNEL);
++ if (!pmic_cb)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&pmic_cb->list);
++ pmic_cb->event = event;
++ pmic_cb->func = func;
++
++ spin_lock_irqsave(&pxa3xx_pmic_ops->cb_lock, flags);
++ list_add(&pmic_cb->list, &pxa3xx_pmic_ops->list);
++ spin_unlock_irqrestore(&pxa3xx_pmic_ops->cb_lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(pmic_callback_register);
++
++/* Unregister pmic callback */
++int pmic_callback_unregister(unsigned long event,
++ void (*func)(unsigned long event))
++{
++ unsigned long flags;
++ struct pmic_callback *pmic_cb, *next;
++
++ spin_lock_irqsave(&pxa3xx_pmic_ops->cb_lock, flags);
++ list_for_each_entry_safe(pmic_cb, next, &pxa3xx_pmic_ops->list, list) {
++ if ((pmic_cb->event == event) && (pmic_cb->func == func)) {
++ list_del_init(&pmic_cb->list);
++ kfree(pmic_cb);
++ }
++ }
++ spin_unlock_irqrestore(&pxa3xx_pmic_ops->cb_lock, flags);
++ return 0;
++}
++EXPORT_SYMBOL(pmic_callback_unregister);
++
++int pmic_event_handle(unsigned long event)
++{
++ int ret;
++ unsigned long flags;
++ struct pmic_callback *pmic_cb;
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ spin_lock_irqsave(&pxa3xx_pmic_ops->cb_lock, flags);
++ list_for_each_entry(pmic_cb, &pxa3xx_pmic_ops->list, list) {
++ spin_unlock_irqrestore(&pxa3xx_pmic_ops->cb_lock, flags);
++ /* event is bit-wise parameter, need bit AND here as filter */
++ if ((pmic_cb->event & event) && (pmic_cb->func))
++ pmic_cb->func(event);
++ spin_lock_irqsave(&pxa3xx_pmic_ops->cb_lock, flags);
++ }
++ spin_unlock_irqrestore(&pxa3xx_pmic_ops->cb_lock, flags);
++ return 0;
++}
++EXPORT_SYMBOL(pmic_event_handle);
++
++
++int px3xx_pmic_event_enable(unsigned long event, int enable)
++{
++ int ret;
++ u8 val;
++ unsigned long flags;
++ struct pmic_callback *pmic_cb;
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++ printk("pxa pmic event enable 11\n");
++ if(pxa3xx_pmic_ops->enable_event)
++ {
++ printk("pxa pmic event enable 22\n");
++ return pxa3xx_pmic_ops->enable_event(event, enable);
++ }
++ else
++ return -EINVAL;
++}
++EXPORT_SYMBOL(px3xx_pmic_event_enable);
++
++int pxa3xx_pmic_is_hookswitch_assert(void)
++{
++ int ret;
++
++ ret = check_pmic_ops();
++ if (ret < 0)
++ return ret;
++
++ if (pxa3xx_pmic_ops->is_hookswitch_assert)
++ return pxa3xx_pmic_ops->is_hookswitch_assert();
++
++ return 0;
++}
++EXPORT_SYMBOL(pxa3xx_pmic_is_hookswitch_assert);
++
+diff -ur linux-2.6.32/arch/arm/mach-pxa/sgh_i780_i900.c kernel/arch/arm/mach-pxa/sgh_i780_i900.c
+--- linux-2.6.32/arch/arm/mach-pxa/sgh_i780_i900.c 2009-12-13 13:00:53.329024629 +0200
++++ kernel/arch/arm/mach-pxa/sgh_i780_i900.c 2009-12-12 16:09:26.486282481 +0200
+@@ -0,0 +1,618 @@
++/**
++ * Support for the PXA311 and PXA312 based Samsung SGH devices
++ * m480, i780, i900, i904, i908, i910
++ *
++ * Copyright (C) 2009 Sacha Refshauge <xsacha@gmail.com>
++ * Copyright (C) 2009 Stefan Schmidt <stefan@datenfreihafen.org>
++ * Copyright (C) 2009 Mustafa Ozsakalli <ozsakalli@hotmail.com>
++ *
++ * Based on zylonite.c Copyright (C) 2006 Marvell International Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/pwm_backlight.h>
++#include <linux/power_supply.h>
++#include <linux/pda_power.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/libertas_spi.h>
++#include <../drivers/staging/android/timed_gpio.h>
++
++#include <plat/i2c.h>
++#include <asm/mach-types.h>
++#include <asm/mach/arch.h>
++#include <mach/hardware.h>
++#include <mach/pxafb.h>
++#include <mach/audio.h>
++#include <mach/mmc.h>
++#include <mach/udc.h>
++#include <mach/ohci.h>
++#include <mach/pxa27x-udc.h>
++#include <mach/pxa27x_keypad.h>
++#include <mach/pxa2xx_spi.h>
++#include <mach/pxa3xx-regs.h>
++#include <mach/mfp-pxa300.h>
++#if defined(CONFIG_PXA_DVFM)
++#include <mach/dvfm.h>
++#include <mach/pxa3xx_dvfm.h>
++#include <mach/pmu.h>
++#endif
++
++#include <mach/sgh_msm6k.h>
++
++#include "devices.h"
++#include "generic.h"
++
++#define SGH_BATT_I2C_SLAVE_ADDRESS 0x34
++
++#define GPIO09_SGH_LED_GREEN 9
++#define GPIO23_SGH_TOUCHSCREEN 23
++#define GPIO71_SGH_LED_BLUE 71
++#define GPIO79_SGH_LED_VIBRATE 79
++#define GPIO88_SGH_BATT_CHARGE 88
++#define GPIO104_SGH_WIFI_CMD 104
++#define GPIO105_SGH_CARD_DETECT 105
++
++#define GPIO18_SGH_I780_WIFI_CMD 11
++#define GPIO19_SGH_I780_SPK_AUDIO 19
++#define GPIO48_SGH_I780_LED_BACKLIGHT 48
++#define GPIO75_SGH_I780_LED_RED 75
++#define GPIO94_SGH_I780_WIFI_POWER 94
++
++#define GPIO03_SGH_I900_WIFI_POWER 3
++#define GPIO17_SGH_I900_SPK_AUDIO 17
++#define GPIO48_SGH_I900_LED_RED 48
++#define GPIO76_SGH_I900_BT_POWER 76
++#define GPIO118_SGH_I900_WIFI_CMD 118
++
++#define GPIO16_SGH_SPI_CHIP_SEL 16
++
++#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
++static struct gpio_led sgh_leds[] = {
++ [0] = {
++ .name = "red",
++ },
++ [1] = {
++ .name = "green",
++ .default_trigger = "mmc0",
++ .gpio = GPIO09_SGH_LED_GREEN,
++ },
++ [2] = {
++ .name = "blue",
++ .default_trigger = "mmc0",
++ .gpio = GPIO71_SGH_LED_BLUE,
++ },
++ [3] = {
++ .name = "keyboard",
++ },
++};
++
++static struct gpio_led_platform_data sgh_leds_info = {
++ .leds = sgh_leds,
++ .num_leds = ARRAY_SIZE(sgh_leds),
++};
++
++static struct platform_device sgh_device_leds = {
++ .name = "leds-gpio",
++ .id = -1,
++ .dev = {
++ .platform_data = &sgh_leds_info,
++ }
++};
++
++static struct timed_gpio sgh_vibrator = {
++ .name = "vibrator",
++ .gpio = GPIO79_SGH_LED_VIBRATE,
++ .max_timeout = 1000,
++};
++
++static struct timed_gpio_platform_data sgh_vibrator_info = {
++ .gpios = &sgh_vibrator,
++ .num_gpios = 1,
++};
++
++static struct platform_device sgh_device_vibrator = {
++ .name = "timed-gpio",
++ .id = -1,
++ .dev = {
++ .platform_data = &sgh_vibrator_info,
++ }
++};
++
++static void __init sgh_init_leds(void)
++{
++
++ sgh_leds[0].gpio = (machine_is_sgh_i780()) ? GPIO75_SGH_I780_LED_RED : GPIO48_SGH_I900_LED_RED;
++ if(machine_is_sgh_i780())
++ sgh_leds[3].gpio = GPIO48_SGH_I780_LED_BACKLIGHT;
++
++ platform_device_register(&sgh_device_leds);
++ //timed_gpio doesnt request gpio
++ gpio_request(GPIO79_SGH_LED_VIBRATE, "SGH-VIBRATOR");
++ platform_device_register(&sgh_device_vibrator);
++
++}
++#else
++static inline void sgh_init_leds(void) {}
++#endif
++
++#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
++static struct platform_pwm_backlight_data sgh_backlight_data = {
++ .pwm_id = 3,
++ .max_brightness = 100,
++ .dft_brightness = 100,
++ .pwm_period_ns = 10000,
++};
++
++static struct platform_device sgh_backlight_device = {
++ .name = "backlight",
++ .dev = {
++ .parent = &pxa27x_device_pwm1.dev,
++ .platform_data = &sgh_backlight_data,
++ },
++};
++/* Pixclock Calculation
++ Calculated from reviewing HaRET source: http://xanadux.cvs.sourceforge.net/viewvc/xanadux/haret/haret-gnu/src/script.cpp?view=markup
++ pixclock = K * 8MHz / CLK ; where CLK is 312MHz and K is last 8 bits of lccr3
++
++ New: pixclock = (K * 200000000) / 15600
++*/
++static struct pxafb_mode_info sgh_i780_mode = {
++ .pixclock = 243600, // K = 19
++ .xres = 320, // HACK: Android does not like square resolutions
++ .yres = 319,
++ .bpp = 16,
++ .hsync_len = 16,
++ .left_margin = 24,
++ .right_margin = 24,
++ .vsync_len = 2,
++ .upper_margin = 3,
++ .lower_margin = 0,
++ .sync = 0,
++};
++static struct pxafb_mode_info sgh_i900_mode = {
++ .pixclock = 256500, // K = 20
++ .xres = 240,
++ .yres = 400,
++ .bpp = 16,
++ .hsync_len = 8,
++ .left_margin = 8,
++ .right_margin = 8,
++ .vsync_len = 4,
++ .upper_margin = 38,
++ .lower_margin = 38,
++ .sync = 0, //FB_SYNC_VERT_HIGH_ACT,
++};
++
++static struct pxafb_mach_info sgh_lcd_info = {
++ .num_modes = 1,
++ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
++};
++
++static void __init sgh_init_lcd(void)
++{
++ platform_device_register(&sgh_backlight_device);
++ sgh_lcd_info.modes = (machine_is_sgh_i780()) ? &sgh_i780_mode : &sgh_i900_mode;
++ set_pxa_fb_info(&sgh_lcd_info);
++}
++#else
++static inline void sgh_init_lcd(void) {}
++#endif
++
++/****************************
++* Keypad *
++****************************/
++
++/*Android (i.e. non-linux) keys:
++Name: defined as: function:
++KEY_SEND 231 Send key
++KEY_END 107 End key
++KEY_BACK 158 Go back a page
++KEY_MENU 139 Open a special menu
++KEY_HOME 102 Return to the home screen
++KEY_SEARCH 217 Open the Android search
++KEY_VOLUMEUP 115 Increase volume
++KEY_VOLUMEDOWN 114 Decrease volume
++KEY_CAMERA 212 Opens camera
++KEY_CAMERAFOCUS 211 Focuses camera (Omnia only, replaces KEY_HP in kernel/include/linux/input.h)
++*/
++
++#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
++/* KEY(row, col, key_code) */
++static unsigned int sgh_i780_matrix_key_map[] = {
++/* QWERTY Keyboard */
++/* 1st row */
++KEY(0, 0, KEY_Q), KEY(7, 1, KEY_W), KEY(2, 0, KEY_E), KEY(3, 0, KEY_R), KEY(4, 0, KEY_T),
++KEY(0, 4, KEY_Y), KEY(1, 4, KEY_U), KEY(2, 4, KEY_I), KEY(3, 4, KEY_O), KEY(4, 4, KEY_P),
++/* 2nd row */
++KEY(0, 1, KEY_A), KEY(7, 2, KEY_S), KEY(2, 1, KEY_D), KEY(3, 1, KEY_F), KEY(4, 1, KEY_G),
++KEY(0, 5, KEY_H), KEY(1, 5, KEY_J), KEY(2, 5, KEY_K), KEY(3, 5, KEY_L), KEY(4, 5, KEY_BACKSPACE),
++/* 3rd row */
++KEY(0, 2, KEY_LEFTALT), KEY(1, 2, KEY_Z), KEY(2, 2, KEY_X), KEY(3, 2, KEY_C), KEY(4, 2, KEY_V),
++KEY(0, 6, KEY_B), KEY(1, 6, KEY_N), KEY(2, 6, KEY_M), KEY(3, 6, KEY_DOT), KEY(4, 6, KEY_ENTER),
++/* 4th row */
++KEY(0, 3, KEY_LEFTSHIFT), KEY(1, 3, KEY_RIGHTALT), KEY(2, 3, KEY_0), KEY(3, 3, KEY_SPACE),
++KEY(4, 3, KEY_COMMA), KEY(7, 6, KEY_SLASH), /* Message */ KEY(5, 1, KEY_TAB), /* GPS */
++
++/* Volume Keys */
++KEY(1, 0, KEY_VOLUMEUP),
++KEY(1, 1, KEY_VOLUMEDOWN),
++
++/* Left Softkey */ /* Windows Key */ /* OK */ /* Right Softkey */
++KEY(5, 4, KEY_MINUS), KEY(5, 2, KEY_MENU), KEY(5, 3, KEY_EXIT), KEY(5, 6, KEY_F2),
++KEY(5, 5, KEY_SEND), KEY(6, 4, KEY_REPLY), KEY(7, 0, KEY_END),
++/* Green Key */ /* Center */ /* Red Key */
++
++/* Camera */
++KEY(7, 3, KEY_CAMERA),
++};
++
++static unsigned int sgh_i900_matrix_key_map[] = {
++ /* KEY(row, col, key_code) */
++ KEY(0, 0, KEY_CAMERAFOCUS), //Camera half-press
++ KEY(0, 1, KEY_CAMERA), //Camera full-press
++ KEY(0, 2, KEY_ENTER), //Center optical dpad button
++ KEY(1, 0, KEY_VOLUMEUP), //Volume up
++ KEY(1, 1, KEY_VOLUMEDOWN), //Volume down
++ KEY(1, 2, KEY_SEND), //Send key
++ KEY(2, 0, KEY_MENU), //Top right key (Main Menu button)
++ KEY(2, 1, KEY_END), //???
++ KEY(2, 2, KEY_BACK), //End key (Back button)
++
++};
++
++static struct pxa27x_keypad_platform_data sgh_keypad_info = {
++ .enable_rotary0 = 0,
++
++ .debounce_interval = 30,
++};
++
++static void __init sgh_init_keypad(void)
++{
++ if(machine_is_sgh_i780())
++ {
++ sgh_keypad_info.matrix_key_rows = 8;
++ sgh_keypad_info.matrix_key_cols = 7;
++ sgh_keypad_info.matrix_key_map = sgh_i780_matrix_key_map;
++ sgh_keypad_info.matrix_key_map_size = ARRAY_SIZE(sgh_i780_matrix_key_map);
++ }
++ else
++ {
++ sgh_keypad_info.matrix_key_rows = 3;
++ sgh_keypad_info.matrix_key_cols = 3;
++ sgh_keypad_info.matrix_key_map = sgh_i900_matrix_key_map;
++ sgh_keypad_info.matrix_key_map_size = ARRAY_SIZE(sgh_i900_matrix_key_map);
++ }
++
++ pxa_set_keypad_info(&sgh_keypad_info);
++}
++#else
++static inline void sgh_init_keypad(void) {}
++#endif
++
++#if defined(CONFIG_MMC)
++static int sgh_mci_sdcard_init(struct device *dev,
++ irq_handler_t sgh_detect_int,
++ void *data)
++{
++ int err, cd_irq;
++ int gpio_cd = GPIO105_SGH_CARD_DETECT;
++
++ cd_irq = gpio_to_irq(gpio_cd);
++
++ /*
++ * setup GPIO for MMC controller
++ */
++ err = gpio_request(gpio_cd, "microSD card detect");
++ if (err)
++ goto err_request_cd;
++ gpio_direction_input(gpio_cd);
++
++ err = request_irq(cd_irq, sgh_detect_int,
++ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
++ "microSD card detect", data);
++ if (err) {
++ printk(KERN_ERR "%s: MicroSD: "
++ "can't request card detect IRQ\n", __func__);
++ goto err_request_cd;
++ }
++
++ return 0;
++
++err_request_cd:
++ return err;
++}
++
++static void sgh_mci_sdcard_exit(struct device *dev, void *data)
++{
++ int cd_irq, gpio_cd;
++
++ cd_irq = gpio_to_irq(105);
++ gpio_cd = 105;
++
++ free_irq(cd_irq, data);
++ gpio_free(gpio_cd);
++}
++
++static struct pxamci_platform_data sgh_mci_sdcard_platform_data = {
++ .detect_delay = 20,
++ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
++ .init = sgh_mci_sdcard_init,
++ .exit = sgh_mci_sdcard_exit,
++ .gpio_card_detect = -1,
++ .gpio_card_ro = -1,
++ .gpio_power = -1,
++};
++
++
++static void __init sgh_init_mmc(void)
++{
++ pxa_set_mci_info(&sgh_mci_sdcard_platform_data); // External MicroSD
++ if(machine_is_sgh_i900())
++ pxa3xx_set_mci2_info(&sgh_mci_sdcard_platform_data); // Internal MicroSD
++}
++#else
++static inline void sgh_init_mmc(void) {}
++#endif
++static void sgh_udc_command(int cmd)
++{
++ switch (cmd) {
++ case PXA2XX_UDC_CMD_CONNECT:
++ //UP2OCR |= UP2OCR_HXOE | UP2OCR_DPPUE | UP2OCR_DPPUBE;
++ UP2OCR |= 0xf024; // USB Port 2 Output Control Register
++ break;
++ case PXA2XX_UDC_CMD_DISCONNECT:
++ //UP2OCR &= ~(UP2OCR_HXOE | UP2OCR_DPPUE | UP2OCR_DPPUBE);
++ UP2OCR &= 0xf024;
++ break;
++ }
++}
++static struct pxa2xx_udc_mach_info sgh_udc_info __initdata = {
++ .udc_command = sgh_udc_command,
++};
++ /* WinMo: UHCHR_SSEP2 | UHCHR_SSEP1 | UHCHR_SSE | UHCHR_CGR | UHCHR_FHR
++ Set the Power Control Polarity Low */
++/* UHCHR = (UHCHR | UHCHR_PCPL) &
++ ~(UHCHR_SSEP1 | UHCHR_SSEP2 | UHCHR_SSE);
++*/
++static int sgh_init_udc(void)
++{
++ pxa_set_udc_info(&sgh_udc_info);
++ return 0;
++}
++
++#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
++static int sgh_ohci_init(struct device *dev)
++{
++ return 0;
++}
++static struct pxaohci_platform_data sgh_ohci_platform_data = {
++ .port_mode = PMM_PERPORT_MODE,
++ .init = sgh_ohci_init
++};
++
++static void __init sgh_init_ohci(void)
++{
++ pxa_set_ohci_info(&sgh_ohci_platform_data);
++}
++#else
++static inline void sgh_init_ohci(void) {}
++#endif /* CONFIG_USB_OHCI_HCD || CONFIG_USB_OHCI_HCD_MODULE */
++
++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
++static struct i2c_board_info __initdata sgh_i2c_board_info[] = {
++ { /* PM6558 Battery */
++ .type = "sgh_battery",
++ .addr = SGH_BATT_I2C_SLAVE_ADDRESS,
++ },
++};
++static void __init sgh_init_i2c(void)
++{
++ i2c_register_board_info(0, sgh_i2c_board_info,
++ ARRAY_SIZE(sgh_i2c_board_info));
++ pxa_set_i2c_info(NULL);
++}
++#else
++static inline void sgh_init_i2c(void) {}
++#endif
++
++#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MASTER)
++static void sgh_spi_wifi_cs(u32 command)
++{
++ gpio_set_value(GPIO16_SGH_SPI_CHIP_SEL, !(command == PXA2XX_CS_ASSERT));
++}
++
++static int sgh_libertas_setup(struct spi_device *spi)
++{
++ int WifiPwr = 0;
++ int WifiCmd = 0;
++ if(machine_is_sgh_i780())
++ {
++ WifiPwr = GPIO94_SGH_I780_WIFI_POWER;
++ WifiCmd = GPIO18_SGH_I780_WIFI_CMD;
++ }
++ else if(machine_is_sgh_i900())
++ {
++ WifiPwr = GPIO03_SGH_I900_WIFI_POWER;
++ WifiCmd = GPIO118_SGH_I900_WIFI_CMD;
++ }
++ gpio_request(WifiPwr,"WLAN");
++ gpio_request(0x10,"WLAN");
++ gpio_request(0x68,"WLAN");
++ gpio_request(WifiCmd,"WLAN");
++
++ //pxa_init_hw
++ gpio_direction_output(0x68,1);
++ gpio_direction_output(WifiCmd,1);
++ gpio_direction_output(WifiPwr,1);
++ gpio_direction_output(0x10,1);
++ mdelay(60);
++
++ gpio_set_value(WifiPwr,1);
++ mdelay(60);
++ gpio_set_value(WifiCmd,1);
++ gpio_set_value(0x10,1);
++ gpio_set_value(0x68,1);
++ mdelay(60);
++
++
++ //gspx_power_up
++ gpio_set_value(WifiPwr,1);
++ mdelay(60);
++ gpio_set_value(WifiCmd,1);
++ gpio_set_value(0x68,1);
++ mdelay(150);
++
++ //gspx_reset_module
++ gpio_set_value(0x68,1);
++ mdelay(60);
++ gpio_set_value(0x68,0);
++ mdelay(60);
++ gpio_set_value(0x68,1);
++ mdelay(100);
++
++ spi->bits_per_word = 16;
++ spi_setup(spi);
++
++ return 0;
++}
++
++static struct pxa2xx_spi_chip sgh_wifi_chip = {
++ .rx_threshold = 8,
++ .tx_threshold = 8,
++ .timeout = 235,
++ .dma_burst_size = 16,
++ .cs_control = sgh_spi_wifi_cs,
++};
++
++static struct pxa2xx_spi_master sgh_spi_info = {
++ .clock_enable = CKEN_SSP1,
++ .num_chipselect = 1,
++ .enable_dma = 1,
++};
++
++struct libertas_spi_platform_data sgh_wifi_pdata = {
++ .use_dummy_writes = 0,
++ .setup = sgh_libertas_setup,
++};
++
++static struct spi_board_info sgh_spi_devices[] __initdata = {
++ { //wireless
++ .modalias = "libertas_spi",
++ .max_speed_hz = 13000000,
++ .bus_num = 1,
++ .irq = IRQ_GPIO(8),
++ .chip_select = 0,
++ .controller_data = &sgh_wifi_chip,
++ .platform_data = &sgh_wifi_pdata,
++ },
++};
++
++static void __init sgh_init_spi(void)
++{
++ sgh_spi_devices[0].irq = IRQ_GPIO(machine_is_sgh_i780() ? 11 : 8);
++ pxa2xx_set_spi_info(1, &sgh_spi_info);
++ spi_register_board_info(ARRAY_AND_SIZE(sgh_spi_devices));
++}
++#else
++static inline void sgh_init_spi(void){}
++#endif
++
++#if defined(CONFIG_PXA_DVFM)
++struct pxa3xx_freq_mach_info sgh_freq_mach_info = {
++ .flags = 0,
++};
++
++static void __init sgh_init_dvfm() {
++ set_pxa3xx_freq_info(&sgh_freq_mach_info);
++ pxa3xx_set_pmu_info(NULL);
++}
++#else
++static inline void sgh_init_dvfm(void){}
++#endif
++
++static mfp_cfg_t sgh_mfp_cfg[] __initdata = {
++ /* AC97 */
++ //GPIO23_AC97_nACRESET,
++ GPIO25_AC97_SDATA_IN_0,
++ GPIO27_AC97_SDATA_OUT,
++ GPIO28_AC97_SYNC,
++ GPIO29_AC97_BITCLK,
++
++ /* KEYPAD */
++ GPIO115_KP_MKIN_0 | MFP_LPM_EDGE_BOTH,
++ GPIO116_KP_MKIN_1 | MFP_LPM_EDGE_BOTH,
++ GPIO117_KP_MKIN_2 | MFP_LPM_EDGE_BOTH,
++ GPIO121_KP_MKOUT_0,
++ GPIO122_KP_MKOUT_1,
++ GPIO123_KP_MKOUT_2,
++ GPIO124_KP_MKOUT_3,
++
++};
++
++static struct platform_device sgh_audio = {
++ .name = "sgh-asoc",
++ .id = -1,
++};
++
++static struct platform_device *devices[] __initdata = {
++ &sgh_audio,
++};
++
++
++static void __init sgh_init(void)
++{
++ static int dvfm = 0;
++
++ pxa3xx_mfp_config(ARRAY_AND_SIZE(sgh_mfp_cfg));
++ sgh_init_dvfm();
++
++ rpc_init();
++
++ sgh_init_lcd();
++ sgh_init_mmc();
++ sgh_init_leds();
++ sgh_init_keypad();
++
++ pxa_set_ac97_info(NULL);
++ platform_add_devices(devices, ARRAY_SIZE(devices));
++
++ sgh_init_ohci();
++ sgh_init_udc();
++ sgh_init_i2c();
++ sgh_init_spi();
++ /*
++ dvfm_register("Test", &dvfm);
++ dvfm_disable_op_name("D1", dvfm);
++ dvfm_disable_op_name("D2", dvfm);
++ */
++}
++
++MACHINE_START(SGH_I780, "Samsung SGH-i780 (Mirage) phone")
++ .phys_io = 0x40000000,
++ .boot_params = 0xa0000100,
++ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
++ .map_io = pxa_map_io,
++ .init_irq = pxa3xx_init_irq,
++ .timer = &pxa_timer,
++ .init_machine = sgh_init,
++MACHINE_END
++
++MACHINE_START(SGH_I900, "Samsung SGH-i900 (Omnia) phone")
++ .phys_io = 0x40000000,
++ .boot_params = 0xa0000100,
++ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
++ .map_io = pxa_map_io,
++ .init_irq = pxa3xx_init_irq,
++ .timer = &pxa_timer,
++ .init_machine = sgh_init,
++MACHINE_END
+diff -ur linux-2.6.32/arch/arm/mach-pxa/sgh_rpc.c kernel/arch/arm/mach-pxa/sgh_rpc.c
+--- linux-2.6.32/arch/arm/mach-pxa/sgh_rpc.c 2009-12-13 13:00:59.168618858 +0200
++++ kernel/arch/arm/mach-pxa/sgh_rpc.c 2009-12-12 16:09:26.486282481 +0200
+@@ -0,0 +1,333 @@
++/**
++ * Samsung SGH I900 RPC Driver for MSM6K
++ *
++ * Copyright (C) 2009 Mustafa Ozsakalli <ozsakalli@hotmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/miscdevice.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/delay.h>
++#include <linux/workqueue.h>
++#include <linux/completion.h>
++#include <linux/list.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++
++#include <asm/uaccess.h>
++
++#include <mach/hardware.h>
++#include <mach/sgh_msm6k.h>
++
++
++#include "devices.h"
++
++static DEFINE_SPINLOCK(msgs_lock);
++static DEFINE_SPINLOCK(crc_lock);
++
++static void do_read_data(struct work_struct *work);
++static DECLARE_WORK(work_read, do_read_data);
++static struct workqueue_struct *workqueue;
++
++struct class *sgh_rpc_class;
++dev_t sgh_rpc_devno;
++
++static struct cdev rpc_cdev;
++static struct device *rpc_device;
++
++struct __rpc_msg {
++ int command;
++ int type;
++ int index;
++ int len;
++ int crc;
++ char *data;
++
++ struct __rpc_msg *next;
++};
++
++static struct __rpc_msg *msgs_head = NULL;
++static struct __rpc_msg *msgs_tail = NULL;
++
++static void *rpc_malloc(unsigned sz) {
++ void *ptr = kmalloc(sz, GFP_KERNEL);
++
++ if(ptr)
++ return ptr;
++
++ printk(KERN_ERR "sgh_rpc: kmalloc of %d failed, retrying...\n", sz);
++
++ do {
++ ptr = kmalloc(sz, GFP_KERNEL);
++ } while (!ptr);
++
++ return ptr;
++}
++
++static void do_read_data(struct work_struct *work) {
++ struct __rpc_msg *msg;
++ int pktlen, rpclen;
++ unsigned char end_flag;
++ char buf[11];
++ unsigned long flags=0;
++
++ msg = (struct __rpc_msg *)rpc_malloc(sizeof(struct __rpc_msg));
++ msg->data = NULL;
++
++ if(smd_read(CH_RPC, buf,11) == 0) {
++ if(buf[0] != 0x7f) {
++ goto cleanup;
++ }
++
++ pktlen = (buf[2]<<8)|(buf[1]);
++ msg->crc = buf[3];
++ rpclen = (buf[5]<<8)|(buf[4]);
++ msg->len = rpclen - 7;
++ msg->index = (buf[7]<<8)|(buf[6]);
++ msg->command = (buf[8]<<8)|(buf[9]);
++ msg->type = buf[10];
++
++ if(msg->len > 0) {
++ msg->data = (char *)rpc_malloc(msg->len);
++ if(smd_read(CH_RPC, msg->data, msg->len) != 0) {
++ goto cleanup;
++ }
++ }
++
++ if(smd_read(CH_RPC, &end_flag,1)!=0 || end_flag!=0x7e){
++ goto cleanup;
++ }
++
++ spin_lock_irqsave(&msgs_lock, flags);
++ if(msgs_tail != NULL)
++ msgs_tail->next = msg;
++ msgs_tail = msg;
++ msgs_tail->next = NULL;
++ if(msgs_head == NULL)
++ msgs_head = msg;
++ spin_unlock_irqrestore(&msgs_lock, flags);
++
++ goto success;
++
++ }
++
++cleanup:
++ if(msg->data)
++ kfree(msg->data);
++
++ kfree(msg);
++
++success:
++ queue_work(workqueue, &work_read);
++}
++
++static int write_index = 0xff00;
++
++static char __crc;
++
++static char calc_crc() {
++ int64_t m;
++ int u, rc;
++
++ m = (__crc+1) * -2130574327; //0x81020409
++ u = m>>32;
++ u += __crc+1;
++ u >>= 6;
++
++ u += ((unsigned)u>>31);
++ u += ((unsigned)u<<7);
++ u = __crc+1 - u;
++
++ rc = __crc;
++ __crc = u;
++
++ return rc;
++}
++
++static int rpc_write(unsigned cmd, unsigned type,void *data, unsigned len) {
++ char *pkt;
++ char *p;
++ int crc;
++ unsigned long flags=0;
++ unsigned n;
++
++ pkt = rpc_malloc(len+11);
++ p = pkt;
++
++ n = len + 10;
++
++ *p++ = 0x7f;
++ *p++ = n & 0xff;
++ *p++ = (n>>8) & 0xff;
++ spin_lock_irqsave(&crc_lock, flags);
++ crc = calc_crc();
++ spin_unlock_irqrestore(&crc_lock, flags);
++ *p++ = crc;
++
++ n = len + 7;
++ *p++ = (n) & 0xff;
++ *p++ = (n>>8) & 0xff;
++ *p++ = write_index++ & 0xff; //index
++ *p++ = 0xff;
++ *p++ = (cmd>>8) & 0xff;
++ *p++ = cmd & 0xff;
++ *p++ = type & 0xff;
++
++ if(len > 0 && data != NULL) {
++ copy_from_user(p, data, len);
++ p += len;
++ }
++
++ *p++ = 0x7e;
++
++ smd_write(CH_RPC, pkt, (unsigned)(p - pkt));
++
++ kfree(pkt);
++
++ return len;
++}
++
++
++static int rpc_ops_open(struct inode *inode, struct file *filp) {
++ int rc;
++
++ rc = nonseekable_open(inode, filp);
++ if (rc < 0)
++ return rc;
++
++ return 0;
++}
++
++static int rpc_ops_release(struct inode *inode, struct file *filp) {
++ return 0;
++}
++
++static ssize_t rpc_ops_read(struct file *filp, char __user *buf,size_t count, loff_t *ppos) {
++ unsigned long flags = 0;
++ struct __rpc_msg *msg = NULL;
++ int len = 0;
++
++
++ msg = msgs_head;
++ if(msg == NULL) return -EIO;
++
++ spin_lock_irqsave(&msgs_lock, flags);
++ msgs_head = msgs_head->next;
++ if(msgs_head == NULL)
++ msgs_tail = NULL;
++ spin_unlock_irqrestore(&msgs_lock, flags);
++
++ if(msg->data != NULL && msg->len > 0) {
++ len = count > msg->len ? msg->len : count;
++ if(copy_to_user(buf, msg->data, len)!=0)
++ len = -EIO;
++ }
++ if(msg->data != NULL)
++ kfree(msg->data);
++ kfree(msg);
++
++ return len;
++}
++
++static ssize_t rpc_ops_write(struct file *filp, const char __user *buf,size_t count, loff_t *ppos) {
++ char h[6];
++ short *sh;
++
++ if(copy_from_user(h, buf, 6))
++ return 0;
++
++ buf += 6;
++
++ sh = (short *)h;
++
++ return rpc_write(sh[0], sh[1], sh[2]>0 ? buf : NULL, sh[2]);
++}
++
++static unsigned int rpc_ops_poll(struct file *filp, struct poll_table_struct *wait) {
++ unsigned mask = 0;
++
++ return mask;
++}
++
++static long rpc_ops_ioctl(struct file *filp, unsigned int cmd,unsigned long arg) {
++ struct __rpc_msg *msg;
++
++ msg = msgs_head;
++ if(msg == NULL)
++ return -EIO;
++
++ return copy_to_user((void *)arg, msg, 20);
++}
++
++static struct file_operations rpc_fops = {
++ .owner = THIS_MODULE,
++ .open = rpc_ops_open,
++ .release = rpc_ops_release,
++ .read = rpc_ops_read,
++ .write = rpc_ops_write,
++ //.poll = rpc_ops_poll,
++ .unlocked_ioctl = rpc_ops_ioctl,
++
++};
++
++
++void rpc_init(void) {
++ int rc;
++ int major;
++
++ smd_init();
++
++ /* Create the device nodes */
++ sgh_rpc_class = class_create(THIS_MODULE, "sghrpc");
++ if (IS_ERR(sgh_rpc_class)) {
++ rc = -ENOMEM;
++ printk(KERN_ERR
++ "sgh_rpc: failed to create sghrpc class\n");
++ return;
++ }
++
++ rc = alloc_chrdev_region(&sgh_rpc_devno, 0, 1, "sghrpc");
++ if (rc < 0) {
++ printk(KERN_ERR
++ "rpcrouter: Failed to alloc chardev region (%d)\n", rc);
++ goto fail_destroy_class;
++ }
++
++ major = MAJOR(sgh_rpc_devno);
++ rpc_device = device_create(sgh_rpc_class, NULL,
++ sgh_rpc_devno, NULL, "sghrpc%d:%d",
++ 0, 0);
++ if (IS_ERR(rpc_device)) {
++ rc = -ENOMEM;
++ goto fail_unregister_cdev_region;
++ }
++
++ cdev_init(&rpc_cdev, &rpc_fops);
++ rpc_cdev.owner = THIS_MODULE;
++
++ rc = cdev_add(&rpc_cdev, sgh_rpc_devno, 1);
++ if (rc < 0)
++ goto fail_destroy_device;
++
++ workqueue = create_singlethread_workqueue("sgh-rpc");
++ queue_work(workqueue, &work_read);
++
++ return;
++
++fail_destroy_device:
++ device_destroy(sgh_rpc_class, sgh_rpc_devno);
++fail_unregister_cdev_region:
++ unregister_chrdev_region(sgh_rpc_devno, 1);
++fail_destroy_class:
++ class_destroy(sgh_rpc_class);
++}
+diff -ur linux-2.6.32/arch/arm/mach-pxa/sgh_smd.c kernel/arch/arm/mach-pxa/sgh_smd.c
+--- linux-2.6.32/arch/arm/mach-pxa/sgh_smd.c 2009-12-13 13:01:03.799036125 +0200
++++ kernel/arch/arm/mach-pxa/sgh_smd.c 2009-12-12 16:09:26.486282481 +0200
+@@ -0,0 +1,458 @@
++/**
++ * Support for Samsung SGH I900 MSM6K Shared Memory
++ *
++ * Copyright (C) 2009 Mustafa Ozsakalli <ozsakalli@hotmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/platform_device.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/miscdevice.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/delay.h>
++#include <linux/freezer.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++#include <linux/completion.h>
++
++#include <mach/hardware.h>
++#include <mach/sgh_msm6k.h>
++
++#include "devices.h"
++
++static unsigned mmio;
++static int smd_initialized;
++
++static DEFINE_SPINLOCK(smd_lock);
++
++#define MEMP(x) (void *)(mmio + x)
++#define MEMW(x) *((unsigned short *)(mmio + x))
++#define MEML(x) *((unsigned long *)(mmio + x))
++
++#define HEAD(c) MEMW(c.head)
++#define TAIL(c) MEMW(c.tail)
++#define HEADPTR(c) MEMP(c.base + HEAD(c))
++#define TAILPTR(c) MEMP(c.base + TAIL(c))
++#define SETTAIL(c,t) TAIL(c)=t; TAIL(c)=t; TAIL(c)=t
++#define SETHEAD(c,h) HEAD(c)=h; HEAD(c)=h; HEAD(c)=h
++#define AVAIL(h,t,s) t<=h ? h-t : s - (t-h)
++
++struct smd_half_channel {
++ unsigned head;
++ unsigned tail;
++ unsigned base;
++ unsigned size;
++};
++
++struct smd_channel {
++ struct smd_half_channel send;
++ struct smd_half_channel recv;
++ unsigned head_mask;
++ unsigned tail_mask;
++ unsigned data_mask;
++ unsigned ex_mask;
++ wait_queue_head_t wait_recv;
++ wait_queue_head_t wait_send;
++};
++
++static struct smd_channel smd_channels[] = {
++ //msm6k rpc channel
++ {
++
++ .send = {
++ .head = 0x4,
++ .tail = 0x6,
++ .base = 0x8,
++ .size = 0x3fc,
++ },
++
++ .recv = {
++ .head = 0x1298,
++ .tail = 0x129a,
++ .base = 0x129c,
++ .size = 0x3fc,
++ },
++
++ .head_mask = 0x2,
++ .tail_mask = 0x8,
++ .data_mask = 0x20,
++
++ },
++
++ {
++
++ .send = {
++ .head = 0x404,
++ .tail = 0x406,
++ .base = 0x408,
++ .size = 0xe90,
++ },
++
++ .recv = {
++ .head = 0x1698,
++ .tail = 0x169a,
++ .base = 0x169c,
++ .size = 0x2950,
++ },
++
++ .head_mask = 0x1,
++ .tail_mask = 0x4,
++ .data_mask = 0x10,
++
++ },
++
++};
++
++void smd_phone_power(int on) {
++ if(on){
++ gpio_set_value(0x66,1);
++ gpio_set_value(0x51,1);
++ mdelay(500);
++ gpio_set_value(0x51,0);
++ } else {
++ gpio_set_value(0x66,0);
++ mdelay(500);
++ gpio_set_value(0x66,1);
++
++ }
++}
++
++
++void smd_init_mem(void)
++{
++ int i;
++
++ if(smd_initialized)
++ return;
++
++ MEML(0x20) = 0;
++ MEMW(0x3ffe) = 0x00C1;
++ MEML(0x20) = 0;
++
++ MEMW(0x2) = 0;
++ MEMW(0x4) = 0;
++ MEMW(0x6) = 0;
++
++ for(i = 8; i < 0x404; i += 2)
++ MEMW(i) = 0x1111;
++
++ MEMW(0x404) = 0;
++ MEMW(0x406) = 0;
++
++ for(i = 0x408; i < 0x1298; i += 2)
++ MEMW(i) = 0x2222;
++
++ MEMW(0x1298) = 0;
++ MEMW(0x129A) = 0;
++
++ for(i = 0x129C; i < 0x1698; i += 2)
++ MEMW(i) = 0x3333;
++
++ MEMW(0x1698) = 0;
++ MEMW(0x169A) = 0;
++
++ for(i = 0x169C; i < 0x3FEC; i += 2)
++ MEMW(i) = 0x4444;
++
++ if(MEML(0x18) == 0) {
++ MEMW(0) = 0x00AA;
++ MEMW(2) = 0x0001;
++ }
++
++ MEMW(0x3ffe) = 0x00C2;
++ MEML(0x20) = 0;
++
++ smd_initialized = 1;
++
++ printk("SMD: Initialize Completed\n");
++}
++
++static int smd_write_and_check(unsigned adr, void* data, int len) {
++ int try;
++
++ for(try=0; try<3; try++){
++ memcpy(MEMP(adr), data, len);
++ if(memcmp(MEMP(adr), data, len)==0) break;
++ }
++
++ if(adr == 0x3FFE)
++ MEML(0x20) = 0;
++
++ return try<3 ? 1 : 0;
++
++}
++
++static int smd_read_and_check(unsigned adr, void *data, int len) {
++ int try;
++
++
++ for(try=0;try<3;try++){
++ memcpy(data,MEMP(adr),len);
++ if(memcmp(data,MEMP(adr),len)==0) break;
++ }
++
++ if(try > 2 || adr == 0x3ffc)
++ MEML(0x20) = 0;
++
++ //synch problem
++ if(try>2) return 0;
++
++ return 1;
++
++}
++
++static int smd_get_mask() {
++ unsigned short mask;
++
++ smd_read_and_check(0x3ffc, &mask, 2);
++
++ return mask;
++}
++
++static void smd_set_mask(short mask) {
++ smd_write_and_check(0x3ffe, &mask, 2);
++}
++
++irqreturn_t smd_irq_handler(int irq, void *dev_id){
++ unsigned long flags;
++ int mask,i;
++
++ //printk("SMD: IRQ fired\n");
++
++ spin_lock_irqsave(&smd_lock, flags);
++
++ mask = smd_get_mask();
++
++ if(!(mask&0x80)) goto done;
++
++ switch(mask & ~0x80){
++ case 0x48 : //initialize
++ smd_init_mem();
++ break;
++
++ case 0x4A :
++ printk("SMD: Phone Deep Sleep??\n");
++ /*fire PhoneDeepSleepEvent?*/
++ break;
++ }
++
++
++ for(i=0;i<2;i++){
++ struct smd_channel *c = &smd_channels[i];
++ if(HEAD(c->recv) != TAIL(c->recv))
++ mask |= c->head_mask;
++ }
++
++ if((mask & 0x2a) != 0)
++ smd_channels[0].ex_mask = mask;
++
++ if((mask & 0x15) != 0)
++ smd_channels[1].ex_mask = mask;
++
++
++ for(i=0;i<2;i++){
++ struct smd_channel *c = &smd_channels[i];
++
++ if(HEAD(c->recv) != TAIL(c->recv)){
++ wake_up(&c->wait_recv);
++ }
++
++ if((mask & (0x80 | c->tail_mask)) != 0)
++ wake_up(&c->wait_send);
++ }
++
++done:
++ spin_unlock_irqrestore(&smd_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++int smd_read_avail(struct smd_channel *c) {
++ unsigned head, tail;
++
++ if(!smd_initialized)
++ return 0;
++
++ head = HEAD(c->recv);
++ tail = TAIL(c->recv);
++
++ return AVAIL(head,tail,c->recv.size);
++}
++
++int ch_read(struct smd_channel *c, void *_buf, int len) {
++ int n;
++ int head, tail;
++ int orig_len = len;
++ unsigned char *buf = _buf;
++
++ if(!smd_initialized) return 0;
++
++
++ while(len > 0) {
++ head = HEAD(c->recv);
++ tail = TAIL(c->recv);
++
++ n = tail<=head ? head - tail : c->recv.size - tail;
++ if(n==0) break;
++
++ if(n > len) n = len;
++
++ memcpy(buf, TAILPTR(c->recv), n);
++
++ buf += n;
++ len -= n;
++
++ tail = (tail + n) % (c->recv.size);
++ SETTAIL(c->recv,tail);
++ }
++
++ if(orig_len!=len || HEAD(c->recv)==TAIL(c->recv)) {
++ int mask = c->ex_mask;
++ mask &= c->data_mask;
++ if(mask != 0)
++ smd_set_mask(0x80 | c->tail_mask);
++ }
++
++ return orig_len - len;
++}
++
++int ch_write(struct smd_channel *c, void *buf, int len) {
++ unsigned head, tail ,mask;
++ int n;
++
++ head = HEAD(c->send);
++ tail = TAIL(c->send);
++
++ n = (head < tail) ? tail - head :
++ c->send.size - head;
++
++
++ mask = 0x80;
++
++ if(n > len) n = len;
++
++ if(n > 0) {
++ memcpy(HEADPTR(c->send), buf, n);
++ head = (head + n) % c->send.size;
++ SETHEAD(c->send, head);
++ mask |= c->head_mask;
++ }
++ head = HEAD(c->send);
++ tail = TAIL(c->send);
++
++ if(n < len)
++ mask |= c->data_mask;
++
++ smd_set_mask(mask);
++
++ return n;
++}
++
++struct smd_channel *smd_get_channel(int c) {
++ return &smd_channels[c];
++}
++
++int smd_read(int ch, void *buf, int len) {
++ struct smd_channel *c;
++ unsigned long flags;
++ int rc;
++
++ c = smd_get_channel(ch);
++ for(;;) {
++ spin_lock_irqsave(&smd_lock, flags);
++ if(smd_read_avail(c) >= len) {
++ rc = ch_read(c, buf, len);
++ spin_unlock_irqrestore(&smd_lock, flags);
++ if(rc == len)
++ return 0;
++ else
++ return -EIO;
++ }
++
++ spin_unlock_irqrestore(&smd_lock, flags);
++ wait_event(c->wait_recv, smd_read_avail(c) >= len);
++ }
++
++ return 0;
++}
++
++int smd_write(int ch, void *_buf, int len) {
++ struct smd_channel *c;
++ unsigned long flags;
++ int n;
++ char *buf = _buf;
++
++ c = smd_get_channel(ch);
++ while(len > 0) {
++ spin_lock_irqsave(&smd_lock, flags);
++ n = ch_write(c, buf, len);
++ spin_unlock_irqrestore(&smd_lock, flags);
++
++ len -= n;
++ buf += n;
++
++ wait_event(c->wait_send, len <= 0);
++ }
++
++ return 0;
++}
++
++
++void smd_init(void) {
++ struct resource *r;
++ unsigned short *ram;
++ int rc, i;
++
++ gpio_request(0x6b,"dpram");
++ gpio_request(0x46,"dpram");
++ gpio_request(0x6e,"dpram");
++ gpio_request(0x51,"dpram");
++ gpio_request(0x66,"dpram");
++
++ gpio_direction_output(0x6b,0);
++ gpio_direction_output(0x46,0);
++ gpio_direction_output(0x6e,0);
++ gpio_direction_output(0x51,1);
++ gpio_direction_output(0x66,1);
++
++ smd_phone_power(0);
++
++ gpio_set_value(0x46,0);
++ gpio_set_value(0x6b,0);
++
++ r = request_mem_region(0,0x4000,"dpram");
++ if(r==NULL){
++ printk("SMD: Can't get memory region!\n");
++ return;
++ }
++
++ mmio = (unsigned long)ioremap(r->start,r->end-r->start+1);
++
++ for(i=0;i<2;i++) {
++ init_waitqueue_head(&smd_channels[i].wait_recv);
++ init_waitqueue_head(&smd_channels[i].wait_send);
++ }
++
++ ram = ((unsigned short *)(mmio));
++ //check dpram
++ for(i=0;i<0x2000;i++)
++ ram[i] = 0;
++
++ ram[0] = 0xaa;
++ ram[1] = 1;
++
++ rc = request_irq(IRQ_GPIO(0x46), smd_irq_handler,
++ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
++ "SMD-17", NULL);
++
++ smd_phone_power(0);
++ smd_phone_power(0);
++}
+diff -ur linux-2.6.32/arch/arm/tools/mach-types kernel/arch/arm/tools/mach-types
+--- linux-2.6.32/arch/arm/tools/mach-types 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/tools/mach-types 2009-12-12 16:09:26.746279722 +0200
+@@ -2249,7 +2249,7 @@
+ darwin MACH_DARWIN DARWIN 2262
+ oratiscomu MACH_ORATISCOMU ORATISCOMU 2263
+ rtsbc20 MACH_RTSBC20 RTSBC20 2264
+-sgh_i780 MACH_I780 I780 2265
++sgh_i780 MACH_SGH_I780 SGH_I780 2265
+ gemini324 MACH_GEMINI324 GEMINI324 2266
+ oratislan MACH_ORATISLAN ORATISLAN 2267
+ oratisalog MACH_ORATISALOG ORATISALOG 2268
+diff -ur linux-2.6.32/drivers/i2c/busses/i2c-pxa.c kernel/drivers/i2c/busses/i2c-pxa.c
+--- linux-2.6.32/drivers/i2c/busses/i2c-pxa.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/i2c/busses/i2c-pxa.c 2009-12-12 16:09:31.776280278 +0200
+@@ -1173,7 +1173,7 @@
+ .owner = THIS_MODULE,
+ .pm = I2C_PXA_DEV_PM_OPS,
+ },
+- .id_table = i2c_pxa_id_table,
++ .id_table = &i2c_pxa_id_table,
+ };
+
+ static int __init i2c_adap_pxa_init(void)
+diff -ur linux-2.6.32/drivers/input/keyboard/pxa27x_keypad.c kernel/drivers/input/keyboard/pxa27x_keypad.c
+--- linux-2.6.32/drivers/input/keyboard/pxa27x_keypad.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/input/keyboard/pxa27x_keypad.c 2009-12-12 16:09:32.012943837 +0200
+@@ -32,6 +32,16 @@
+
+ #include <mach/hardware.h>
+ #include <mach/pxa27x_keypad.h>
++#if defined(CONFIG_PXA3xx_DVFM)
++#include <linux/notifier.h>
++#include <linux/timer.h>
++#include <mach/dvfm.h>
++#include <mach/pxa3xx_dvfm.h>
++#endif
++#ifdef CONFIG_ANDROID_POWER
++#include <linux/android_power.h>
++static android_suspend_lock_t pxa27x_keypad_suspend_lock;
++#endif
+ /*
+ * Keypad Controller registers
+ */
+@@ -98,6 +108,25 @@
+ #define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS)
+ #define MAX_KEYPAD_KEYS (MAX_MATRIX_KEY_NUM + MAX_DIRECT_KEY_NUM)
+
++#if defined(CONFIG_PXA3xx_DVFM)
++#define D2_STABLE_JIFFIES 6
++
++static int keyevent_enable = 0;
++static int keypad_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data);
++static struct notifier_block notifier_freq_block = {
++ .notifier_call = keypad_notifier_freq,
++};
++
++static struct dvfm_lock dvfm_lock = {
++ .lock = SPIN_LOCK_UNLOCKED,
++ .dev_idx = -1,
++ .count = 0,
++};
++
++static struct timer_list kp_timer;
++#endif
++
+ struct pxa27x_keypad {
+ struct pxa27x_keypad_platform_data *pdata;
+
+@@ -334,6 +363,8 @@
+ struct pxa27x_keypad *keypad = dev_id;
+ unsigned long kpc = keypad_readl(KPC);
+
++ printk("-- irq handled --\n");
++
+ if (kpc & KPC_DI)
+ pxa27x_keypad_scan_direct(keypad);
+
+@@ -402,6 +433,87 @@
+ clk_disable(keypad->clk);
+ }
+
++#if defined(CONFIG_PXA3xx_DVFM)
++static void set_dvfm_constraint(void)
++{
++ spin_lock_irqsave(&dvfm_lock.lock, dvfm_lock.flags);
++ if (dvfm_lock.count++ == 0) {
++ /* Disable lowpower mode */
++ dvfm_disable_op_name("D1", dvfm_lock.dev_idx);
++ dvfm_disable_op_name("D2", dvfm_lock.dev_idx);
++ if (cpu_is_pxa935())
++ dvfm_disable_op_name("CG", dvfm_lock.dev_idx);
++ }
++ spin_unlock_irqrestore(&dvfm_lock.lock, dvfm_lock.flags);
++}
++
++static void unset_dvfm_constraint(void)
++{
++ spin_lock_irqsave(&dvfm_lock.lock, dvfm_lock.flags);
++ if (dvfm_lock.count == 0) {
++ printk(KERN_WARNING "Keypad constraint has been removed.\n");
++ } else if (--dvfm_lock.count == 0) {
++ /* Enable lowpower mode */
++ dvfm_enable_op_name("D1", dvfm_lock.dev_idx);
++ dvfm_enable_op_name("D2", dvfm_lock.dev_idx);
++ if (cpu_is_pxa935())
++ dvfm_enable_op_name("CG", dvfm_lock.dev_idx);
++ }
++ spin_unlock_irqrestore(&dvfm_lock.lock, dvfm_lock.flags);
++}
++
++/*
++ * FIXME: Here a timer is used to disable entering D1/D2 for a while.
++ * Because keypad event wakeup system from D1/D2 mode. But keypad device
++ * can't detect the interrupt since it's in standby state.
++ * Keypad device need time to detect it again. So we use a timer here.
++ * D1/D2 idle is determined by idle time. It's better to comine these
++ * timers together.
++ */
++static void keypad_timer_handler(unsigned long data)
++{
++ unset_dvfm_constraint();
++}
++
++extern void get_wakeup_source(pm_wakeup_src_t *);
++
++static int keypad_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ struct dvfm_freqs *freqs = (struct dvfm_freqs *)data;
++ struct op_info *new = NULL;
++ struct dvfm_md_opt *op;
++ pm_wakeup_src_t src;
++
++ if (freqs)
++ new = &freqs->new_info;
++ else
++ return 0;
++
++ op = (struct dvfm_md_opt *)new->op;
++ if (val == DVFM_FREQ_POSTCHANGE) {
++ if ((op->power_mode == POWER_MODE_D1) ||
++ (op->power_mode == POWER_MODE_D2) ||
++ (op->power_mode == POWER_MODE_CG)) {
++ //get_wakeup_source(&src);
++ //if (src.bits.mkey || src.bits.dkey) {
++ /* If keypad event happens and wake system
++ * from D1/D2. Disable D1/D2 to make keypad
++ * work for a while.
++ */
++ kp_timer.expires = jiffies + D2_STABLE_JIFFIES;
++ add_timer(&kp_timer);
++ set_dvfm_constraint();
++ #ifdef CONFIG_ANDROID_POWER
++ android_lock_suspend_auto_expire(&pxa27x_keypad_suspend_lock, D2_STABLE_JIFFIES);
++ #endif
++ //}
++ }
++ }
++ return 0;
++}
++#endif
++
+ #ifdef CONFIG_PM
+ static int pxa27x_keypad_suspend(struct device *dev)
+ {
+@@ -410,8 +522,10 @@
+
+ clk_disable(keypad->clk);
+
+- if (device_may_wakeup(&pdev->dev))
++ if (device_may_wakeup(&pdev->dev)) {
++ printk("-- keypad wake set %d\n",keypad->irq);
+ enable_irq_wake(keypad->irq);
++ }
+
+ return 0;
+ }
+@@ -495,6 +609,15 @@
+ goto failed_free_mem;
+ }
+
++#if defined(CONFIG_PXA3xx_DVFM)
++ dvfm_register("Keypad", &dvfm_lock.dev_idx);
++ dvfm_register_notifier(&notifier_freq_block,
++ DVFM_FREQUENCY_NOTIFIER);
++ init_timer(&kp_timer);
++ kp_timer.function = keypad_timer_handler;
++ kp_timer.data = 0;
++#endif
++
+ keypad->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(keypad->clk)) {
+ dev_err(&pdev->dev, "failed to get keypad clock\n");
+@@ -596,11 +719,18 @@
+
+ static int __init pxa27x_keypad_init(void)
+ {
++#ifdef CONFIG_ANDROID_POWER
++ pxa27x_keypad_suspend_lock.name = "pxa27x_keypad";
++ android_init_suspend_lock(&pxa27x_keypad_suspend_lock);
++#endif
+ return platform_driver_register(&pxa27x_keypad_driver);
+ }
+
+ static void __exit pxa27x_keypad_exit(void)
+ {
++#ifdef CONFIG_ANDROID_POWER
++ android_uninit_suspend_lock(&pxa27x_keypad_suspend_lock);
++#endif
+ platform_driver_unregister(&pxa27x_keypad_driver);
+ }
+
+diff -ur linux-2.6.32/drivers/mmc/host/pxamci.c kernel/drivers/mmc/host/pxamci.c
+--- linux-2.6.32/drivers/mmc/host/pxamci.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/mmc/host/pxamci.c 2009-12-12 16:09:33.709612828 +0200
+@@ -127,9 +127,10 @@
+ break;
+ udelay(1);
+ } while (timeout--);
+-
++ /*
+ if (v & STAT_CLK_EN)
+ dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
++ */
+ }
+ }
+
+diff -ur linux-2.6.32/drivers/net/wireless/libertas/if_spi.c kernel/drivers/net/wireless/libertas/if_spi.c
+--- linux-2.6.32/drivers/net/wireless/libertas/if_spi.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/net/wireless/libertas/if_spi.c 2009-12-12 16:09:35.289611714 +0200
+@@ -1020,9 +1020,9 @@
+ lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
+ return -EAFNOSUPPORT;
+ }
+- snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
++ snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "gspi%d_hlp.bin",
+ chip_id_to_device_name[i].name);
+- snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
++ snprintf(main_fw, IF_SPI_FW_NAME_MAX, "gspi%d.bin",
+ chip_id_to_device_name[i].name);
+ return 0;
+ }
+diff -ur linux-2.6.32/drivers/power/Kconfig kernel/drivers/power/Kconfig
+--- linux-2.6.32/drivers/power/Kconfig 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/power/Kconfig 2009-12-12 16:09:35.736280931 +0200
+@@ -110,4 +110,10 @@
+ help
+ Say Y to include support for NXP PCF50633 Main Battery Charger.
+
++config BATTERY_SGH
++ tristate "SGH battery driver"
++ depends on I2C
++ help
++ Say Y here to enable support for PM6558(I2C) chip used on Samsung I780/I900.
++
+ endif # POWER_SUPPLY
+diff -ur linux-2.6.32/drivers/power/Makefile kernel/drivers/power/Makefile
+--- linux-2.6.32/drivers/power/Makefile 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/power/Makefile 2009-12-12 16:09:35.736280931 +0200
+@@ -29,3 +29,5 @@
+ obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
+ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
++obj-$(CONFIG_BATTERY_SGH) += sgh_battery.o
++
+diff -ur linux-2.6.32/drivers/power/sgh_battery.c kernel/drivers/power/sgh_battery.c
+--- linux-2.6.32/drivers/power/sgh_battery.c 2009-12-13 13:03:35.181931149 +0200
++++ kernel/drivers/power/sgh_battery.c 2009-12-12 16:09:35.746280509 +0200
+@@ -0,0 +1,474 @@
++/*
++ * Samsung I780/I900 battery driver
++ *
++ * Copyright (C) 2009 Sacha Refshauge <xsacha@gmail.com>
++ *
++ * Based on DQ27x00 battery driver:
++ * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
++ * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
++ *
++ * which was based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ *
++ */
++#include <linux/module.h>
++#include <linux/param.h>
++#include <linux/jiffies.h>
++#include <linux/workqueue.h>
++#include <linux/delay.h>
++#include <linux/platform_device.h>
++#include <linux/power_supply.h>
++#include <linux/idr.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#define DRIVER_VERSION "1.0.1"
++
++#define SGH_CHARGE_GPIO 88
++
++#define SGH_BATT_REG_TEMP 0x25
++#define SGH_BATT_REG_VDIFF 0x23
++#define SGH_BATT_REG_VOLT 0x21
++
++/* Reg Table
++This is what is known of the register banks in PM6558 by
++observation. Assumed that all registers are WORDs, so
++address increases by 2. Also assumed that all registers
++are 12-bit right justified (& 0xFFF).
++
++Register Task Value
++0x21 Voltage The voltage
++0x23 Charging True if charging, False if not charging
++0x25 Temperature The temperature (which is used to determine charge)
++0xC2 Shutdown Write-only regisiter
++
++*/
++
++struct sgh_batt_device_info;
++struct sgh_batt_access_methods {
++ int (*read)(u8 reg, int *rt_value, int b_single,
++ struct sgh_batt_device_info *di);
++};
++struct sgh_batt_device_info {
++ struct device *dev;
++ int id;
++ int voltage_uV;
++ int current_uA;
++ int temp_C;
++ int charge_rsoc;
++ struct sgh_batt_access_methods *bus;
++ struct power_supply bat;
++ struct power_supply bat_ac;
++ struct power_supply bat_usb;
++
++ struct i2c_client *client;
++
++ struct delayed_work work;
++ struct workqueue_struct *wqueue;
++
++ int voltage;
++ int voltage_sum;
++ int voltage_count;
++ int capacity;
++ int charging_status;
++ int poll_count;
++};
++
++
++static enum power_supply_property sgh_batt_battery_props[] = {
++ POWER_SUPPLY_PROP_STATUS,
++ POWER_SUPPLY_PROP_HEALTH,
++ POWER_SUPPLY_PROP_TECHNOLOGY,
++ POWER_SUPPLY_PROP_PRESENT,
++ POWER_SUPPLY_PROP_BATT_VOL,
++ POWER_SUPPLY_PROP_CAPACITY,
++ POWER_SUPPLY_PROP_BATT_TEMP,
++};
++
++static enum power_supply_property sgh_batt_power_props[] = {
++ POWER_SUPPLY_PROP_ONLINE,
++};
++
++static int sgh_batt_read(u8 reg, int *rt_value, struct sgh_batt_device_info *di)
++{
++ struct i2c_client *client = di->client;
++
++ *rt_value = be16_to_cpu(i2c_smbus_read_word_data(client, reg));
++ *rt_value = *rt_value & 0xFFF;
++
++ return 0;
++}
++
++/*
++ * Return the battery voltage in millivolts
++ *
++ */
++static int sgh_batt_get_voltage(struct sgh_batt_device_info *di)
++{
++ int i;
++ int voltages[5];
++ int voltage = 0, largest = 0, smallest = 0;
++
++ for(i = 0; i < 5; i++)
++ {
++ sgh_batt_read(SGH_BATT_REG_VOLT, &voltages[i], di);
++ if (voltages[i] > voltages[largest])
++ largest = i;
++
++ if (voltages[i] < voltages[smallest])
++ smallest = i;
++ }
++ for(i = 0; i < 5; i++)
++ {
++ if (i != smallest && i != largest)
++ voltage += voltages[i];
++ }
++
++ voltage /= 3;
++
++ if(di->voltage_count < 10) {
++ di->voltage_sum += voltage;
++ di->voltage_count++;
++ voltage = di->voltage_sum / di->voltage_count;
++ } else {
++ di->voltage_sum = di->voltage_sum - di->voltage + voltage;
++ voltage = di->voltage_sum / 10;
++ }
++
++ return voltage;
++}
++
++/*
++ * Return the battery temperature in (10x) Celcius degrees.
++ *
++ * From Windows Mobile:
++ * Temp Sample [ Min: 0x21B, 0x368, 0x89e : Max]
++ * 539 872 2206
++ */
++static int sgh_batt_get_temp(struct sgh_batt_device_info *di)
++{
++ int temp = 0;
++
++ sgh_batt_read(SGH_BATT_REG_TEMP, &temp, di);
++
++ return temp >> 2;
++}
++
++/*
++ * Return the battery charge in percentage.
++ */
++static int sgh_batt_get_charge(struct sgh_batt_device_info *di)
++{
++ int volt = di->voltage;
++ int i, k = 0, d = 10;
++ int ndist, tdist;
++ int vsamp[] = {0xe38, 0xdb6, 0xd66, 0xd25, 0xce4, 0xc94, 0xb79};
++ // Charging applies a greater voltage. USB: ~0x30 AC: ~0x60
++ // volt -= be16_to_cpu(i2c_smbus_read_word_data(di->client, SGH_BATT_REG_VDIFF)) & 0xFFF; // FIXME
++
++ /* Use voltage to work out charge.
++ Closer to 100%, the voltage has less impact on gradient (linear).
++ Whereas closer to 0%, it is purely the gradient.
++ */
++ for (i = 6; i >= 0; i--)
++ {
++ if (volt < vsamp[i])
++ {
++ switch (i) {
++ case 0:
++ k = k + 1;
++ case 1:
++ k = k + 1;
++ case 2:
++ k = k + 1;
++ case 3:
++ d = d >> 1;
++ case 4:
++ k = k + 1;
++ case 5:
++ ndist = 100 * (volt - vsamp[i+1]);
++ tdist = (vsamp[i] - vsamp[i+1]);
++ volt = (k * 100) + (ndist / tdist);
++ return volt / d;
++ default:
++ return 0;
++ }
++ }
++ }
++ return 100;
++}
++
++#define to_sgh_batt_device_info(x) container_of((x), \
++ struct sgh_batt_device_info, bat);
++
++static int sgh_batt_battery_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct sgh_batt_device_info *di = to_sgh_batt_device_info(psy);
++ switch (psp) {
++
++ case POWER_SUPPLY_PROP_BATT_VOL:
++ val->intval = sgh_batt_get_voltage(di);
++ break;
++
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = true; // Device can't run without it
++ break;
++ case POWER_SUPPLY_PROP_CAPACITY:
++ val->intval = di->capacity;
++ break;
++ case POWER_SUPPLY_PROP_BATT_TEMP:
++ val->intval = sgh_batt_get_temp(di);
++ break;
++
++ case POWER_SUPPLY_PROP_STATUS:
++ val->intval = di->charging_status ? POWER_SUPPLY_STATUS_CHARGING : POWER_SUPPLY_STATUS_NOT_CHARGING;
++ break;
++ case POWER_SUPPLY_PROP_TECHNOLOGY:
++ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
++ break;
++ case POWER_SUPPLY_PROP_HEALTH:
++ val->intval = POWER_SUPPLY_HEALTH_GOOD;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int sgh_batt_power_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct sgh_batt_device_info *di = to_sgh_batt_device_info(psy);
++ switch (psp) {
++ case POWER_SUPPLY_PROP_ONLINE:
++ if (psy->type == POWER_SUPPLY_TYPE_MAINS)
++ val->intval = (di->charging_status == 1) ? 1 : 0;
++ else if (psy->type == POWER_SUPPLY_TYPE_USB)
++ val->intval = (di->charging_status == 2) ? 1 : 0;
++ else val->intval = 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void sgh_batt_battery_update(struct power_supply *psy)
++{
++ int charging_status;
++ struct sgh_batt_device_info *di = to_sgh_batt_device_info(psy);
++ charging_status = di->charging_status;
++
++ di->poll_count++;
++ if(gpio_get_value(SGH_CHARGE_GPIO)) {
++ di->charging_status = 0; //not charging
++ } else {
++ di->charging_status = 1; //ac
++ //TODO: Detect usb
++ }
++
++ if(di->charging_status != charging_status || di->poll_count >= 5) {
++ if(di->charging_status != charging_status)
++ di->voltage_sum = di->voltage_count = 0;
++
++ di->voltage = sgh_batt_get_voltage(di);
++ di->capacity = sgh_batt_get_charge(di);
++
++ printk("pwr: V:%x C:%d\n",di->voltage,di->capacity);
++
++ power_supply_changed(psy);
++ di->poll_count = 0;
++ }
++
++ /*
++ di->charging_status = gpio_get_value(SGH_CHARGE_GPIO) ?
++ POWER_SUPPLY_STATUS_NOT_CHARGING :
++ POWER_SUPPLY_STATUS_CHARGING;
++ */
++/*
++ if (di->charging_status != charging_status)
++ {
++ di->reset_avg = 1;
++ di->poll_count = 0xff;
++ }
++
++ if(di->poll_count >= 10) {
++ di->poll_count = 0;
++ power_supply_changed(psy);
++ }
++*/
++}
++
++static void sgh_batt_battery_work(struct work_struct *work)
++{
++ struct sgh_batt_device_info *di = container_of(work, struct sgh_batt_device_info, work.work);
++
++ sgh_batt_battery_update(&di->bat);
++ queue_delayed_work(di->wqueue, &di->work, HZ*5);
++}
++
++static char *supply_list[] = {
++ "battery",
++};
++
++static void sgh_powersupply_init(struct sgh_batt_device_info *di) {
++ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
++ di->bat.properties = sgh_batt_battery_props;
++ di->bat.num_properties = ARRAY_SIZE(sgh_batt_battery_props);
++ di->bat.get_property = sgh_batt_battery_get_property;
++ di->bat.external_power_changed = NULL;
++}
++
++static void sgh_powersupply_power_init(struct power_supply *bat,int is_usb) {
++ bat->name = is_usb ? "usb" : "ac";
++ bat->type = is_usb ? POWER_SUPPLY_TYPE_USB : POWER_SUPPLY_TYPE_MAINS;
++ bat->supplied_to = supply_list;
++ bat->num_supplicants = ARRAY_SIZE(supply_list);
++ bat->properties = sgh_batt_power_props;
++ bat->num_properties = ARRAY_SIZE(sgh_batt_power_props);
++ bat->get_property = sgh_batt_power_get_property;
++}
++
++static int sgh_batt_battery_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct sgh_batt_device_info *di;
++ struct sgh_batt_access_methods *bus;
++ int retval = 0;
++
++ retval = gpio_request(SGH_CHARGE_GPIO, "BATT CHRG");
++ if (retval)
++ goto batt_failed_0;
++
++ di = kzalloc(sizeof(*di), GFP_KERNEL);
++ if (!di) {
++ dev_err(&client->dev, "failed to allocate device info data\n");
++ retval = -ENOMEM;
++ return retval;
++ }
++
++ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
++ if (!bus) {
++ dev_err(&client->dev, "failed to allocate access method "
++ "data\n");
++ retval = -ENOMEM;
++ goto batt_failed_1;
++ }
++
++ i2c_set_clientdata(client, di);
++ di->dev = &client->dev;
++ di->bat.name = "battery"; // Android only looks for this
++ di->bus = bus;
++ di->client = client;
++ di->poll_count = 0;
++ sgh_powersupply_init(di);
++ retval = power_supply_register(&client->dev, &di->bat);
++ if (retval) {
++ dev_err(&client->dev, "failed to register battery\n");
++ goto batt_failed_2;
++ }
++
++ sgh_powersupply_power_init(&di->bat_ac,0);
++ retval = power_supply_register(&client->dev, &di->bat_ac);
++ if (retval) {
++ dev_err(&client->dev, "failed to register battery (ac)\n");
++ goto batt_failed_2;
++ }
++
++ sgh_powersupply_power_init(&di->bat_usb,1);
++ retval = power_supply_register(&client->dev, &di->bat_usb);
++ if (retval) {
++ dev_err(&client->dev, "failed to register battery (usb)\n");
++ goto batt_failed_2;
++ }
++
++ INIT_DELAYED_WORK(&di->work, sgh_batt_battery_work);
++ di->wqueue = create_singlethread_workqueue("battery");
++ queue_delayed_work(di->wqueue, &di->work, 1);
++
++ dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
++
++ return 0;
++
++batt_failed_2:
++ kfree(bus);
++batt_failed_1:
++ kfree(di);
++batt_failed_0:
++
++ return retval;
++}
++
++static int sgh_batt_battery_remove(struct i2c_client *client)
++{
++ struct sgh_batt_device_info *di = i2c_get_clientdata(client);
++
++ cancel_rearming_delayed_workqueue(di->wqueue,
++ &di->work);
++ destroy_workqueue(di->wqueue);
++
++ gpio_free(SGH_CHARGE_GPIO);
++
++ power_supply_unregister(&di->bat);
++
++ kfree(di->bat.name);
++
++ kfree(di);
++
++ return 0;
++}
++
++
++/*
++ * Module stuff
++ */
++
++static const struct i2c_device_id sgh_batt_id[] = {
++ { "sgh_battery", 0 },
++ {},
++};
++
++static struct i2c_driver sgh_batt_battery_driver = {
++ .driver = {
++ .name = "battery",
++ },
++ .probe = sgh_batt_battery_probe,
++ .remove = sgh_batt_battery_remove,
++ .suspend = NULL,
++ .resume = NULL, //todo: power management
++ .id_table = sgh_batt_id,
++};
++
++static int __init sgh_batt_battery_init(void)
++{
++ int ret;
++
++ ret = i2c_add_driver(&sgh_batt_battery_driver);
++ if (ret)
++ printk(KERN_ERR "Unable to register Samsung I780/I900 driver\n");
++
++ return ret;
++}
++module_init(sgh_batt_battery_init);
++
++static void __exit sgh_batt_battery_exit(void)
++{
++ i2c_del_driver(&sgh_batt_battery_driver);
++}
++module_exit(sgh_batt_battery_exit);
++
++MODULE_AUTHOR("Sacha Refshauge <xsacha@gmail.com>");
++MODULE_DESCRIPTION("Samsung I780/I900 battery monitor driver");
++MODULE_LICENSE("GPL");
+diff -ur linux-2.6.32/drivers/video/pxafb.c kernel/drivers/video/pxafb.c
+--- linux-2.6.32/drivers/video/pxafb.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/drivers/video/pxafb.c 2009-12-13 14:40:18.638427304 +0200
+@@ -62,6 +62,11 @@
+ #include <mach/bitfield.h>
+ #include <mach/pxafb.h>
+
++#ifdef CONFIG_PXA3xx_DVFM
++#include <mach/dvfm.h>
++#include <mach/pxa3xx_dvfm.h>
++#endif
++
+ /*
+ * Complain if VAR is out of range.
+ */
+@@ -86,6 +91,19 @@
+
+ static unsigned long video_mem_size = 0;
+
++#ifdef CONFIG_PXA3xx_DVFM
++static int fb_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data);
++static struct notifier_block notifier_freq_block = {
++ .notifier_call = fb_notifier_freq,
++};
++
++static void *dev_id = NULL;
++
++static int hss = 0;
++static int pxafb_adjust_pcd(struct pxafb_info *fbi, int hss);
++#endif
++
+ static inline unsigned long
+ lcd_readl(struct pxafb_info *fbi, unsigned int off)
+ {
+@@ -1468,6 +1486,11 @@
+ lcd_writel(fbi, LCSR1, lcsr1);
+ }
+ #endif
++ if( lcsr & (LCSR_BS|LCSR_SOF))
++ {
++ wake_up(&fbi->ctrlr_wait);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+@@ -1642,7 +1665,7 @@
+ {
+ struct pxafb_info *fbi = dev_get_drvdata(dev);
+
+- set_ctrlr_state(fbi, C_DISABLE_PM);
++ //set_ctrlr_state(fbi, C_DISABLE_PM);
+ return 0;
+ }
+
+@@ -1650,7 +1673,7 @@
+ {
+ struct pxafb_info *fbi = dev_get_drvdata(dev);
+
+- set_ctrlr_state(fbi, C_ENABLE_PM);
++ //set_ctrlr_state(fbi, C_ENABLE_PM);
+ return 0;
+ }
+
+@@ -1660,6 +1683,87 @@
+ };
+ #endif
+
++#ifdef CONFIG_PXA3xx_DVFM
++static int dvfm_dev_idx;
++static void set_dvfm_constraint(void)
++{
++ /* Disable Lowpower mode */
++ /* Remove D0CS constraint since LCCR3_STALL is set */
++// dvfm_disable_op_name("D0CS", dvfm_dev_idx);
++ dvfm_disable_op_name("D1", dvfm_dev_idx);
++ dvfm_disable_op_name("D2", dvfm_dev_idx);
++ if (cpu_is_pxa935())
++ dvfm_disable_op_name("CG", dvfm_dev_idx);
++}
++
++static void unset_dvfm_constraint(void)
++{
++ /* Enable Lowpower mode */
++ /* Remove D0CS constraint since LCCR3_STALL is set */
++// dvfm_enable_op_name("D0CS", dvfm_dev_idx);
++ dvfm_enable_op_name("D1", dvfm_dev_idx);
++ dvfm_enable_op_name("D2", dvfm_dev_idx);
++ if (cpu_is_pxa935())
++ dvfm_enable_op_name("CG", dvfm_dev_idx);
++}
++
++static int fb_notifier_freq(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ struct dvfm_freqs *freqs = (struct dvfm_freqs *)data;
++ struct op_info *new = NULL;
++ struct dvfm_md_opt *op;
++/*
++ if (freqs) {
++ new = &freqs->new_info;
++ } else
++ return 0;
++
++ op = (struct dvfm_md_opt *)new->op;
++ switch (val) {
++ case DVFM_FREQ_PRECHANGE:
++ if ((op->power_mode == POWER_MODE_D0) ||
++ (op->power_mode == POWER_MODE_D0CS))
++ hss = op->hss;
++ else if ((op->power_mode == POWER_MODE_D1) ||
++ (op->power_mode == POWER_MODE_D2) ||
++ (op->power_mode == POWER_MODE_CG))
++ lcd_update = 0;
++ break;
++ case DVFM_FREQ_POSTCHANGE:
++ if ((op->power_mode == POWER_MODE_D1) ||
++ (op->power_mode == POWER_MODE_D2) ||
++ (op->power_mode == POWER_MODE_CG))
++ lcd_update = 1;
++ break;
++ }
++*/
++ return 0;
++}
++
++static int pxafb_adjust_pcd(struct pxafb_info *fbi, int hss)
++{
++
++ return 0;
++}
++
++void pxafb_set_pcd(void)
++{
++/*
++ struct pxafb_info *fbi = (struct pxafb_info *)dev_id;
++
++ if (fbi)
++ pxafb_adjust_pcd(fbi, hss);
++*/
++ return;
++}
++
++EXPORT_SYMBOL(pxafb_set_pcd);
++#else
++static void set_dvfm_constraint(void) {}
++static void unset_dvfm_constraint(void) {}
++#endif
++
+ static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi)
+ {
+ int size = PAGE_ALIGN(fbi->video_mem_size);
+
+diff -ur linux-2.6.32/include/linux/input.h kernel/include/linux/input.h
+--- linux-2.6.32/include/linux/input.h 2009-12-03 05:51:21.000000000 +0200
++++ kernel/include/linux/input.h 2009-12-12 16:09:40.056274324 +0200
+@@ -333,6 +333,7 @@
+ #define KEY_BASSBOOST 209
+ #define KEY_PRINT 210 /* AC Print */
+ #define KEY_HP 211
++#define KEY_CAMERAFOCUS 211
+ #define KEY_CAMERA 212
+ #define KEY_SOUND 213
+ #define KEY_QUESTION 214
+diff -ur linux-2.6.32/include/linux/power_supply.h kernel/include/linux/power_supply.h
+--- linux-2.6.32/include/linux/power_supply.h 2009-12-03 05:51:21.000000000 +0200
++++ kernel/include/linux/power_supply.h 2009-12-12 16:09:40.166275516 +0200
+@@ -113,6 +113,8 @@
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
++ POWER_SUPPLY_PROP_BATT_VOL,
++ POWER_SUPPLY_PROP_BATT_TEMP,
+ /* Properties of type `const char *' */
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+diff -ur linux-2.6.32/include/linux/time.h kernel/include/linux/time.h
+--- linux-2.6.32/include/linux/time.h 2009-12-03 05:51:21.000000000 +0200
++++ kernel/include/linux/time.h 2009-12-12 16:09:40.246280545 +0200
+@@ -107,6 +107,7 @@
+ extern int no_sync_cmos_clock __read_mostly;
+ void timekeeping_init(void);
+ extern int timekeeping_suspended;
++extern void update_sleep_time(struct timespec ts);
+
+ unsigned long get_seconds(void);
+ struct timespec current_kernel_time(void);
+diff -ur linux-2.6.32/kernel/printk.c kernel/kernel/printk.c
+--- linux-2.6.32/kernel/printk.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/kernel/printk.c 2009-12-12 16:09:40.512534939 +0200
+@@ -257,6 +257,53 @@
+ #endif
+
+ /*
++ * Return the number of unread characters in the log buffer.
++ */
++static int log_buf_get_len(void)
++{
++ return logged_chars;
++}
++
++/*
++ * Clears the ring-buffer
++ */
++void log_buf_clear(void)
++{
++ logged_chars = 0;
++}
++
++/*
++ * Copy a range of characters from the log buffer.
++ */
++int log_buf_copy(char *dest, int idx, int len)
++{
++ int ret, max;
++ bool took_lock = false;
++
++ if (!oops_in_progress) {
++ spin_lock_irq(&logbuf_lock);
++ took_lock = true;
++ }
++
++ max = log_buf_get_len();
++ if (idx < 0 || idx >= max) {
++ ret = -1;
++ } else {
++ if (len > max - idx)
++ len = max - idx;
++ ret = len;
++ idx += (log_end - max);
++ while (len-- > 0)
++ dest[len] = LOG_BUF(idx + len);
++ }
++
++ if (took_lock)
++ spin_unlock_irq(&logbuf_lock);
++
++ return ret;
++}
++
++/*
+ * Commands to do_syslog:
+ *
+ * 0 -- Close the log. Currently a NOP.
+@@ -1405,3 +1452,4 @@
+ }
+ EXPORT_SYMBOL(printk_timed_ratelimit);
+ #endif
++
+diff -ur linux-2.6.32/kernel/time/timekeeping.c kernel/kernel/time/timekeeping.c
+--- linux-2.6.32/kernel/time/timekeeping.c 2009-12-03 05:51:21.000000000 +0200
++++ kernel/kernel/time/timekeeping.c 2009-12-12 16:09:40.559199813 +0200
+@@ -886,3 +886,14 @@
+ now.tv_nsec + mono.tv_nsec);
+ return now;
+ }
++
++void update_sleep_time(struct timespec ts)
++{
++ long wtm_sec, wtm_nsec;
++ wtm_sec = wall_to_monotonic.tv_sec - ts.tv_sec;
++ wtm_nsec = wall_to_monotonic.tv_nsec - ts.tv_nsec;
++ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++ set_normalized_timespec(&total_sleep_time,
++ (ts.tv_sec + total_sleep_time.tv_sec),
++ (ts.tv_nsec + total_sleep_time.tv_nsec));
++}
+diff -ur linux-2.6.32/sound/soc/pxa/Kconfig kernel/sound/soc/pxa/Kconfig
+--- linux-2.6.32/sound/soc/pxa/Kconfig 2009-12-03 05:51:21.000000000 +0200
++++ kernel/sound/soc/pxa/Kconfig 2009-12-12 16:09:41.692528097 +0200
+@@ -144,3 +144,14 @@
+ help
+ Say Y if you want to add support for SoC audio on the
+ IMote 2.
++
++config SND_SOC_SGH
++ tristate "SoC Audio support for Samsung SGH I900"
++ depends on SND_PXA2XX_SOC && MACH_SGH_I900
++ select SND_PXA2XX_SOC_AC97
++ select SND_PXA_SOC_SSP
++ select SND_SOC_WM9713
++ help
++ Say Y if you want to add support for SoC audio on the
++ Samsung SGH I900 mobile phone.
++
+diff -ur linux-2.6.32/sound/soc/pxa/Makefile kernel/sound/soc/pxa/Makefile
+--- linux-2.6.32/sound/soc/pxa/Makefile 2009-12-03 05:51:21.000000000 +0200
++++ kernel/sound/soc/pxa/Makefile 2009-12-12 16:09:41.692528097 +0200
+@@ -23,6 +23,7 @@
+ snd-soc-magician-objs := magician.o
+ snd-soc-mioa701-objs := mioa701_wm9713.o
+ snd-soc-imote2-objs := imote2.o
++snd-soc-sgh-objs := sgh.o
+
+ obj-$(CONFIG_SND_PXA2XX_SOC_CORGI) += snd-soc-corgi.o
+ obj-$(CONFIG_SND_PXA2XX_SOC_POODLE) += snd-soc-poodle.o
+@@ -37,3 +38,4 @@
+ obj-$(CONFIG_SND_PXA2XX_SOC_MIOA701) += snd-soc-mioa701.o
+ obj-$(CONFIG_SND_SOC_ZYLONITE) += snd-soc-zylonite.o
+ obj-$(CONFIG_SND_PXA2XX_SOC_IMOTE2) += snd-soc-imote2.o
++obj-$(CONFIG_SND_SOC_SGH) += snd-soc-sgh.o
+diff -ur linux-2.6.32/sound/soc/pxa/sgh.c kernel/sound/soc/pxa/sgh.c
+--- linux-2.6.32/sound/soc/pxa/sgh.c 2009-12-13 13:07:09.965238502 +0200
++++ kernel/sound/soc/pxa/sgh.c 2009-12-12 16:09:41.695861483 +0200
+@@ -0,0 +1,310 @@
++/*
++ * Handles the Samsung I780-I900 SoC system
++ *
++ * Copyright (C) 2009 Mustafa Ozsakalli
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation in version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/delay.h>
++#include <linux/irq.h>
++
++#include <asm/mach-types.h>
++#include <mach/audio.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++#include <sound/initval.h>
++#include <sound/ac97_codec.h>
++
++#include "pxa2xx-pcm.h"
++#include "pxa2xx-ac97.h"
++#include "../codecs/wm9713.h"
++#include "pxa-ssp.h"
++
++#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
++
++#define SGH_I780_AUDIO_GPIO 0x13
++#define SGH_I900_AUDIO_GPIO 0x11
++
++static const struct snd_soc_dapm_widget sgh_dapm_widgets[] = {
++ SND_SOC_DAPM_SPK("Front Speaker", NULL),
++ SND_SOC_DAPM_HP("Headset", NULL),
++ SND_SOC_DAPM_LINE("GSM Line Out", NULL),
++ SND_SOC_DAPM_LINE("GSM Line In", NULL),
++ SND_SOC_DAPM_LINE("Radio Line Out", NULL),
++ SND_SOC_DAPM_MIC("Front Mic", NULL),
++};
++
++static const struct snd_soc_dapm_route audio_map[] = {
++ /* Microphone */
++ {"MIC1", NULL, "Front Mic"},
++
++ /* Speaker */
++ {"Front Speaker", NULL, "SPKL"},
++ {"Front Speaker", NULL, "SPKR"},
++
++ /* Earpiece */
++ {"Headset", NULL, "HPL"},
++ {"Headset", NULL, "HPR"},
++
++ /* GSM Module */
++ {"MONOIN", NULL, "GSM Line Out"},
++ {"PCBEEP", NULL, "GSM Line Out"},
++ {"GSM Line In", NULL, "MONO"},
++
++ /* FM Radio Module */
++ {"LINEL", NULL, "Radio Line Out"},
++ {"LINER", NULL, "Radio Line Out"},
++};
++
++static int sgh_wm9713_init(struct snd_soc_codec *codec)
++{
++ unsigned short reg;
++
++ snd_soc_dapm_new_controls(codec, ARRAY_AND_SIZE(sgh_dapm_widgets));
++ snd_soc_dapm_add_routes(codec, ARRAY_AND_SIZE(audio_map));
++
++ snd_soc_dapm_enable_pin(codec, "Front Speaker");
++
++ snd_soc_dapm_sync(codec);
++
++
++ return 0;
++}
++
++static int sgh_hifi_startup(struct snd_pcm_substream *substream){
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
++
++ cpu_dai->playback.channels_min = 2;
++ cpu_dai->playback.channels_max = 2;
++
++ return 0;
++}
++
++static int sgh_hifi_prepare(struct snd_pcm_substream *substream) {
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_codec *codec = rtd->socdev->card->codec;
++ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
++ u16 reg;
++ int gpio = machine_is_sgh_i780() ? SGH_I780_AUDIO_GPIO : SGH_I900_AUDIO_GPIO;
++
++ codec->write(codec, AC97_POWERDOWN, 0);
++ mdelay(1);
++ codec_dai->ops->set_pll(codec_dai, 0, 4096000, 0);
++ schedule_timeout_interruptible(msecs_to_jiffies(10));
++ codec->write(codec, AC97_HANDSET_RATE, 0x0000);
++ schedule_timeout_interruptible(msecs_to_jiffies(10));
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ reg = AC97_PCM_FRONT_DAC_RATE;
++ else
++ reg = AC97_PCM_LR_ADC_RATE;
++ codec->write(codec, AC97_EXTENDED_STATUS, 0x1);
++ codec->write(codec, reg, substream->runtime->rate);
++
++ //Turn on external speaker
++ //TODO: Headset detection
++ gpio_set_value(gpio, 1);
++
++ return 0;
++}
++
++static void sgh_hifi_shutdown(struct snd_pcm_substream *substream) {
++ int gpio = machine_is_sgh_i780() ? SGH_I780_AUDIO_GPIO : SGH_I900_AUDIO_GPIO;
++ gpio_direction_output(gpio, 1);
++ gpio_set_value(gpio, 0);
++}
++
++static int sgh_voice_startup(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
++
++ cpu_dai->playback.channels_min = 1;
++ cpu_dai->playback.channels_max = 1;
++
++ return 0;
++};
++
++static int sgh_voice_prepare(struct snd_pcm_substream *substream)
++{
++#define WM9713_DR_8000 0x1F40 /* 8000 samples/sec */
++#define WM9713_DR_11025 0x2B11 /* 11025 samples/sec */
++#define WM9713_DR_12000 0x2EE0 /* 12000 samples/sec */
++#define WM9713_DR_16000 0x3E80 /* 16000 samples/sec */
++#define WM9713_DR_22050 0x5622 /* 22050 samples/sec */
++#define WM9713_DR_24000 0x5DC0 /* 24000 samples/sec */
++#define WM9713_DR_32000 0x7D00 /* 32000 samples/sec */
++#define WM9713_DR_44100 0xAC44 /* 44100 samples/sec */
++#define WM9713_DR_48000 0xBB80 /* 48000 samples/sec */
++
++ return 0;
++};
++
++static void sgh_voice_shutdown(struct snd_pcm_substream *substream)
++{
++
++};
++
++static struct snd_soc_ops sgh_ops[] = {
++{
++ .startup = sgh_hifi_startup,
++ .prepare = sgh_hifi_prepare,
++ .shutdown = sgh_hifi_shutdown,
++},
++{
++ .startup = sgh_voice_startup,
++ .prepare = sgh_voice_prepare,
++ .shutdown = sgh_voice_shutdown,
++},
++};
++
++static struct snd_soc_dai_link sgh_dai[] = {
++ {
++ .name = "AC97",
++ .stream_name = "AC97 HiFi",
++ .cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_HIFI],
++ .codec_dai = &wm9713_dai[WM9713_DAI_AC97_HIFI],
++ .init = sgh_wm9713_init,
++ .ops = &sgh_ops[0],
++ },
++ {
++ .name = "AC97 Aux",
++ .stream_name = "AC97 Aux",
++ .cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_AUX],
++ .codec_dai = &wm9713_dai[WM9713_DAI_AC97_AUX],
++ },
++ {
++ .name = "WM9713 Voice",
++ .stream_name = "WM9713 Voice",
++ .cpu_dai = &pxa_ssp_dai[PXA_DAI_SSP3],
++ .codec_dai = &wm9713_dai[WM9713_DAI_PCM_VOICE],
++ .ops = &sgh_ops[1],
++ },
++};
++
++static struct snd_soc_card sgh = {
++ .name = "SGHAudio",
++ .platform = &pxa2xx_soc_platform,
++ .dai_link = sgh_dai,
++ .num_links = ARRAY_SIZE(sgh_dai),
++};
++
++static struct snd_soc_device sgh_snd_devdata = {
++ .card = &sgh,
++ .codec_dev = &soc_codec_dev_wm9713,
++};
++
++static struct platform_device *sgh_snd_device;
++
++static int sgh_wm9713_probe(struct platform_device *pdev)
++{
++ int ret;
++ int gpio = machine_is_sgh_i780() ? SGH_I780_AUDIO_GPIO : SGH_I900_AUDIO_GPIO;
++
++ gpio_request(0x64, "WM9713 Power");
++ gpio_direction_output(0x64, 1);
++ gpio_set_value(0x64, 0);
++ mdelay(10);
++ gpio_set_value(0x64, 1);
++
++ gpio_request(gpio, "Speaker");
++ gpio_direction_output(gpio, 1);
++ gpio_set_value(gpio, 0);
++
++ sgh_snd_device = platform_device_alloc("soc-audio", -1);
++ if (!sgh_snd_device)
++ return -ENOMEM;
++
++ platform_set_drvdata(sgh_snd_device, &sgh_snd_devdata);
++ sgh_snd_devdata.dev = &sgh_snd_device->dev;
++
++ ret = platform_device_add(sgh_snd_device);
++ if (ret != 0)
++ platform_device_put(sgh_snd_device);
++
++ return ret;
++}
++
++static int __devexit sgh_wm9713_remove(struct platform_device *pdev)
++{
++ platform_device_unregister(sgh_snd_device);
++ return 0;
++}
++
++#ifdef CONFIG_PM
++
++static int sgh_wm9713_suspend(struct platform_device *pdev,
++ pm_message_t state)
++{
++ //struct snd_soc_card *card = platform_get_drvdata(pdev);
++ return 0;
++ //return snd_soc_card_suspend_pcms(card, state);
++}
++
++static int sgh_wm9713_resume(struct platform_device *pdev)
++{
++ //struct snd_soc_card *card = platform_get_drvdata(pdev);
++ return 0;
++ //return snd_soc_card_resume_pcms(card);
++}
++
++#else
++#define sgh_wm9713_suspend NULL
++#define sgh_wm9713_resume NULL
++#define sgh_wm9713_suspend_late NULL
++#define sgh_wm9713_resume_early NULL
++#endif
++
++static struct platform_driver sgh_wm9713_driver = {
++ .probe = sgh_wm9713_probe,
++ .remove = __devexit_p(sgh_wm9713_remove),
++ .suspend = sgh_wm9713_suspend,
++ .resume = sgh_wm9713_resume,
++ .driver = {
++ .name = "sgh-asoc",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init sgh_asoc_init(void)
++{
++ int ret;
++
++ ret = platform_driver_register(&sgh_wm9713_driver);
++
++ return ret;
++}
++
++static void __exit sgh_asoc_exit(void)
++{
++ platform_driver_unregister(&sgh_wm9713_driver);
++}
++
++module_init(sgh_asoc_init);
++module_exit(sgh_asoc_exit);
++
++/* Module information */
++MODULE_AUTHOR("Mustafa Ozsakalli (ozsakalli@hotmail.com)");
++MODULE_DESCRIPTION("ALSA SoC WM9713 Samsung SGH I780/I900");
++MODULE_LICENSE("GPL");
diff --git a/recipes/linux/linux-sgh-i900/sgh_i900_defconfig b/recipes/linux/linux-sgh-i900/sgh_i900_defconfig
index bca41c1090..60fc936cdd 100644
--- a/recipes/linux/linux-sgh-i900/sgh_i900_defconfig
+++ b/recipes/linux/linux-sgh-i900/sgh_i900_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.29
-# Thu Oct 1 16:49:04 2009
+# Linux kernel version: 2.6.32
+# Mon Dec 7 05:59:02 2009
#
CONFIG_ARM=y
CONFIG_HAVE_PWM=y
@@ -9,8 +9,6 @@ CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -19,14 +17,14 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_ARCH_MTD_XIP=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -47,11 +45,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
@@ -72,10 +71,12 @@ CONFIG_NAMESPACES=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
-CONFIG_PANIC_TIMEOUT=0
# CONFIG_EMBEDDED is not set
CONFIG_UID16=y
CONFIG_SYSCTL_SYSCALL=y
@@ -93,8 +94,12 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
-CONFIG_AIO=y
CONFIG_ASHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -102,13 +107,17 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@@ -120,8 +129,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -142,18 +150,22 @@ CONFIG_FREEZER=y
#
# System Type
#
+CONFIG_MMU=y
# CONFIG_ARCH_AAEC2000 is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -162,24 +174,27 @@ CONFIG_FREEZER=y
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
# CONFIG_ARCH_KIRKWOOD is not set
-# CONFIG_ARCH_KS8695 is not set
-# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
-# CONFIG_ARCH_MXC is not set
# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
# CONFIG_ARCH_PNX4008 is not set
CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_MSM is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_MSM is not set
-# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_BCMRING is not set
#
# Intel PXA2xx/PXA3xx Implementations
@@ -193,12 +208,16 @@ CONFIG_CPU_PXA310=y
# CONFIG_CPU_PXA320 is not set
# CONFIG_CPU_PXA930 is not set
# CONFIG_CPU_PXA935 is not set
+# CONFIG_CPU_PXA950 is not set
+CONFIG_PXA3xx_PMIC=y
# CONFIG_ARCH_GUMSTIX is not set
# CONFIG_MACH_INTELMOTE2 is not set
+# CONFIG_MACH_STARGATE2 is not set
# CONFIG_ARCH_LUBBOCK is not set
# CONFIG_MACH_LOGICPD_PXA270 is not set
# CONFIG_MACH_MAINSTONE is not set
# CONFIG_MACH_MP900C is not set
+# CONFIG_MACH_BALLOON3 is not set
# CONFIG_ARCH_PXA_IDP is not set
# CONFIG_PXA_SHARPSL is not set
# CONFIG_ARCH_VIPER is not set
@@ -206,23 +225,30 @@ CONFIG_CPU_PXA310=y
# CONFIG_TRIZEPS_PXA is not set
# CONFIG_MACH_H5000 is not set
# CONFIG_MACH_EM_X270 is not set
+# CONFIG_MACH_EXEDA is not set
# CONFIG_MACH_COLIBRI is not set
+# CONFIG_MACH_COLIBRI300 is not set
+# CONFIG_MACH_COLIBRI320 is not set
# CONFIG_MACH_ZYLONITE is not set
-CONFIG_MACH_SGH_I780=y
CONFIG_MACH_SGH_I900=y
+CONFIG_MACH_SGH_I780=y
# CONFIG_MACH_LITTLETON is not set
# CONFIG_MACH_TAVOREVB is not set
# CONFIG_MACH_SAAR is not set
# CONFIG_MACH_ARMCORE is not set
# CONFIG_MACH_CM_X300 is not set
+# CONFIG_MACH_H4700 is not set
# CONFIG_MACH_MAGICIAN is not set
+# CONFIG_MACH_HIMALAYA is not set
# CONFIG_MACH_MIOA701 is not set
# CONFIG_MACH_PCM027 is not set
# CONFIG_ARCH_PXA_PALM is not set
+# CONFIG_MACH_CSB726 is not set
# CONFIG_PXA_EZX is not set
+# CONFIG_MACH_XCEP is not set
CONFIG_PXA3xx=y
CONFIG_PXA_SSP=y
-# CONFIG_PXA_PWM is not set
+CONFIG_PLAT_PXA=y
#
# Processor Type
@@ -231,7 +257,7 @@ CONFIG_CPU_32=y
CONFIG_CPU_XSC3=y
CONFIG_CPU_32v5=y
CONFIG_CPU_ABRT_EV5T=y
-CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_PABRT_LEGACY=y
CONFIG_CPU_CACHE_VIVT=y
CONFIG_CPU_TLB_V4WBI=y
CONFIG_CPU_CP15=y
@@ -246,6 +272,7 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_OUTER_CACHE=y
CONFIG_CACHE_XSC3L2=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
CONFIG_IWMMXT=y
CONFIG_COMMON_CLKDEV=y
@@ -261,19 +288,21 @@ CONFIG_COMMON_CLKDEV=y
#
CONFIG_TICK_ONESHOT=y
# CONFIG_NO_HZ is not set
-CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_HIGH_RES_TIMERS is not set
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_HZ=100
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
-CONFIG_ARCH_FLATMEM_HAS_HOLES=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -285,9 +314,12 @@ CONFIG_SPLIT_PTLOCK_CPUS=4096
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
#
# Boot options
@@ -302,25 +334,8 @@ CONFIG_ATAGS_PROC=y
#
# CPU Power Management
#
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_TABLE=y
-# CONFIG_CPU_FREQ_DEBUG is not set
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
-# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set
-# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
-# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
-CONFIG_CPU_FREQ_MIN_TICKS=10
-CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER=1000
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_GOV_LADDER=y
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
#
# Floating point emulation
@@ -360,13 +375,19 @@ CONFIG_EARLYSUSPEND=y
# CONFIG_CONSOLE_EARLYSUSPEND is not set
CONFIG_FB_EARLYSUSPEND=y
# CONFIG_APM_EMULATION is not set
+CONFIG_PM_RUNTIME=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_PXA_DVFM=y
+# CONFIG_PXA_MIPSRAM is not set
+CONFIG_PXA3xx_DVFM=y
+# CONFIG_PXA3xx_DVFM_STATS is not set
+# CONFIG_PXA3xx_PMU is not set
+# CONFIG_PERIPHERAL_STATUS is not set
CONFIG_NET=y
#
# Networking options
#
-CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@@ -401,11 +422,11 @@ CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_ANDROID_PARANOID_NETWORK is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -419,6 +440,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+CONFIG_PHONET=m
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -433,26 +456,25 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
CONFIG_AF_RXRPC=m
# CONFIG_AF_RXRPC_DEBUG is not set
CONFIG_RXKAD=m
-CONFIG_PHONET=m
CONFIG_WIRELESS=y
CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
# CONFIG_CFG80211_REG_DEBUG is not set
-CONFIG_NL80211=y
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEFAULT_PS_VALUE=1
+# CONFIG_CFG80211_DEBUGFS is not set
CONFIG_WIRELESS_OLD_REGULATORY=y
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
CONFIG_LIB80211=y
CONFIG_LIB80211_DEBUG=y
CONFIG_MAC80211=y
-
-#
-# Rate control algorithm selection
-#
CONFIG_MAC80211_RC_MINSTREL=y
# CONFIG_MAC80211_RC_DEFAULT_PID is not set
CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
CONFIG_MAC80211_RC_DEFAULT="minstrel"
-CONFIG_MAC80211_MESH=y
+# CONFIG_MAC80211_MESH is not set
# CONFIG_MAC80211_LEDS is not set
# CONFIG_MAC80211_DEBUGFS is not set
# CONFIG_MAC80211_DEBUG_MENU is not set
@@ -468,21 +490,22 @@ CONFIG_MAC80211_MESH=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_EXTRA_FIRMWARE is not set
-# CONFIG_EXTRA_FIRMWARE_DIR is not set
+# CONFIG_EXTRA_FIRMWARE is not set
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
# CONFIG_MTD_PARTITIONS is not set
-# CONFIG_MTD_TESTS is not set
#
# User Modules And Translation Layers
@@ -520,7 +543,6 @@ CONFIG_MTD_CFI_I2=y
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-# CONFIG_MTD_SHARP_SL is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -528,6 +550,7 @@ CONFIG_MTD_CFI_I2=y
#
# CONFIG_MTD_DATAFLASH is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -563,7 +586,21 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-# CONFIG_MISC_DEVICES is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -586,10 +623,7 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_ETHERNET is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
CONFIG_WLAN_80211=y
CONFIG_LIBERTAS=m
@@ -598,17 +632,20 @@ CONFIG_LIBERTAS=m
CONFIG_LIBERTAS_SPI=m
# CONFIG_LIBERTAS_DEBUG is not set
# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
# CONFIG_USB_ZD1201 is not set
# CONFIG_USB_NET_RNDIS_WLAN is not set
# CONFIG_RTL8187 is not set
# CONFIG_MAC80211_HWSIM is not set
# CONFIG_P54_COMMON is not set
-# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_ATH_COMMON is not set
# CONFIG_HOSTAP is not set
# CONFIG_B43 is not set
# CONFIG_B43LEGACY is not set
# CONFIG_ZD1211RW is not set
# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
+# CONFIG_IWM is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -622,6 +659,7 @@ CONFIG_LIBERTAS_SPI=m
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_CDC_PHONET is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -629,6 +667,7 @@ CONFIG_LIBERTAS_SPI=m
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -647,34 +686,44 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=400
# CONFIG_INPUT_JOYDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
-# CONFIG_INPUT_KEYRESET is not set
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
-# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
CONFIG_KEYBOARD_PXA27x=y
-CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
-# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
CONFIG_TOUCHSCREEN_WM97XX=y
@@ -682,10 +731,11 @@ CONFIG_TOUCHSCREEN_WM97XX=y
# CONFIG_TOUCHSCREEN_WM9712 is not set
CONFIG_TOUCHSCREEN_WM9713=y
# CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE is not set
-CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE=y
+CONFIG_TOUCHSCREEN_SGH=y
# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_ATI_REMOTE is not set
# CONFIG_INPUT_ATI_REMOTE2 is not set
@@ -694,8 +744,7 @@ CONFIG_INPUT_MISC=y
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_CM109 is not set
# CONFIG_INPUT_UINPUT is not set
-# CONFIG_INPUT_GPIO is not set
-# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
#
# Hardware I/O ports
@@ -713,7 +762,6 @@ CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
-CONFIG_DEVMEM=y
# CONFIG_DEVKMEM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
@@ -725,22 +773,23 @@ CONFIG_DEVMEM=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_PXA=y
CONFIG_SERIAL_PXA_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_SGH_MODEM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
-# CONFIG_DCC_TTY is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=y
@@ -752,6 +801,7 @@ CONFIG_I2C_ALGOBIT=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_PXA=y
@@ -775,13 +825,7 @@ CONFIG_I2C_PXA=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
-# CONFIG_SENSORS_PCA963X is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -802,6 +846,11 @@ CONFIG_SPI_PXA2XX=y
#
CONFIG_SPI_SPIDEV=y
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -827,18 +876,23 @@ CONFIG_GPIOLIB=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
CONFIG_POWER_SUPPLY=y
# CONFIG_POWER_SUPPLY_DEBUG is not set
CONFIG_PDA_POWER=y
# CONFIG_BATTERY_DS2760 is not set
-# CONFIG_BATTERY_FAKE_BATTERY is not set
+# CONFIG_BATTERY_DS2782 is not set
# CONFIG_BATTERY_WM97XX is not set
# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
CONFIG_BATTERY_SGH=y
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -850,7 +904,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Multifunction device drivers
#
-# CONFIG_MFD_CORE is not set
+CONFIG_MFD_CORE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
@@ -864,24 +918,14 @@ CONFIG_HTC_PASIC3=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -915,13 +959,17 @@ CONFIG_FB_PXA=y
CONFIG_FB_PXA_OVERLAY=y
CONFIG_FB_PXA_SMARTPANEL=y
CONFIG_FB_PXA_PARAMETERS=y
+CONFIG_FB_PXA_PAN_FIX=y
# CONFIG_FB_MBX is not set
# CONFIG_FB_W100 is not set
+# CONFIG_FB_TMIO is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LMS283GF05 is not set
# CONFIG_LCD_LTV350QV is not set
# CONFIG_LCD_ILI9320 is not set
# CONFIG_LCD_TDO24M is not set
@@ -965,32 +1013,36 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
-CONFIG_SND_SEQUENCER=y
-# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
# CONFIG_SND_MIXER_OSS is not set
# CONFIG_SND_PCM_OSS is not set
-# CONFIG_SND_SEQUENCER_OSS is not set
-# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
-# CONFIG_SND_DEBUG_VERBOSE is not set
+CONFIG_SND_DEBUG_VERBOSE=y
+CONFIG_SND_PCM_XRUN_DEBUG=y
CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=y
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_ARM=y
-CONFIG_SND_PXA2XX_PCM=m
CONFIG_SND_PXA2XX_LIB=y
CONFIG_SND_PXA2XX_LIB_AC97=y
-CONFIG_SND_PXA2XX_AC97=m
+# CONFIG_SND_PXA2XX_AC97 is not set
# CONFIG_SND_SPI is not set
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
CONFIG_SND_SOC_AC97_BUS=y
CONFIG_SND_PXA2XX_SOC=y
CONFIG_SND_PXA2XX_SOC_AC97=y
+CONFIG_SND_PXA_SOC_SSP=y
CONFIG_SND_SOC_SGH=y
CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_ALL_CODECS is not set
@@ -999,7 +1051,6 @@ CONFIG_SND_SOC_WM9713=y
CONFIG_AC97_BUS=y
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-CONFIG_HID_DEBUG=y
# CONFIG_HIDRAW is not set
#
@@ -1011,7 +1062,6 @@ CONFIG_HID_DEBUG=y
#
# Special HID drivers
#
-CONFIG_HID_COMPAT=y
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1038,6 +1088,8 @@ CONFIG_USB_MON=m
# CONFIG_USB_C67X00_HCD is not set
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1060,11 +1112,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
# CONFIG_USB_LIBUSUAL is not set
@@ -1092,7 +1144,6 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
@@ -1109,20 +1160,16 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
#
CONFIG_USB_OTG_UTILS=y
CONFIG_USB_GPIO_VBUS=m
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
CONFIG_MMC_UNSAFE_RESUME=y
-# CONFIG_SDIO_FORCE_OPCOND_1_8V is not set
-# CONFIG_SDIO_WORKAROUND_MARVELL_CIS_B1_BUG is not set
-# CONFIG_MMC_EMBEDDED_SDIO is not set
-# CONFIG_MMC_PARANOID_SD_INIT is not set
#
# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
-# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set
# CONFIG_SDIO_UART is not set
# CONFIG_MMC_TEST is not set
@@ -1131,9 +1178,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y
#
CONFIG_MMC_PXA=y
CONFIG_MMC_SDHCI=y
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -1142,7 +1191,12 @@ CONFIG_LEDS_CLASS=y
#
# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -1151,9 +1205,13 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-# CONFIG_LEDS_TRIGGER_SLEEP is not set
-# CONFIG_SWITCH is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@@ -1167,7 +1225,6 @@ CONFIG_RTC_INTF_SYSFS=y
CONFIG_RTC_INTF_PROC=y
CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
-CONFIG_RTC_INTF_ALARM=y
# CONFIG_RTC_DRV_TEST is not set
#
@@ -1186,6 +1243,7 @@ CONFIG_RTC_INTF_ALARM=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1197,6 +1255,7 @@ CONFIG_RTC_INTF_ALARM=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1219,17 +1278,18 @@ CONFIG_RTC_INTF_ALARM=y
# CONFIG_RTC_DRV_SA1100 is not set
CONFIG_RTC_DRV_PXA=y
# CONFIG_DMADEVICES is not set
-# CONFIG_REGULATOR is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
CONFIG_STAGING=y
# CONFIG_STAGING_EXCLUDE_BUILD is not set
-# CONFIG_MEILHAUS is not set
# CONFIG_USB_IP_COMMON is not set
# CONFIG_W35UND is not set
# CONFIG_PRISM2_USB is not set
# CONFIG_ECHO is not set
-# CONFIG_USB_ATMEL is not set
-# CONFIG_AGNX is not set
# CONFIG_OTUS is not set
# CONFIG_COMEDI is not set
# CONFIG_ASUS_OLED is not set
@@ -1237,18 +1297,25 @@ CONFIG_STAGING=y
# CONFIG_TRANZPORT is not set
#
-# Android
+# Qualcomm MSM Camera And Video
#
-# CONFIG_ANDROID is not set
-# CONFIG_ANDROID_BINDER_IPC is not set
-# CONFIG_ANDROID_LOGGER is not set
-# CONFIG_ANDROID_RAM_CONSOLE is not set
-# CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE is not set
-# CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION is not set
-# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set
-# CONFIG_ANDROID_TIMED_OUTPUT is not set
-# CONFIG_ANDROID_TIMED_GPIO is not set
-# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
+
+#
+# Camera Sensor Selection
+#
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_DST is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_PLAN9AUTH is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_UDL is not set
+
+#
+# RAR Register Driver
+#
+# CONFIG_RAR_REGISTER is not set
+# CONFIG_IIO is not set
#
# File systems
@@ -1257,6 +1324,7 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
@@ -1267,10 +1335,13 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1280,6 +1351,11 @@ CONFIG_INOTIFY_USER=y
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -1315,16 +1391,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-CONFIG_YAFFS_FS=y
-CONFIG_YAFFS_YAFFS1=y
-# CONFIG_YAFFS_9BYTE_TAGS is not set
-# CONFIG_YAFFS_DOES_ECC is not set
-CONFIG_YAFFS_YAFFS2=y
-CONFIG_YAFFS_AUTO_YAFFS2=y
-# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
-# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
-CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
@@ -1393,6 +1459,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1401,12 +1468,16 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
# CONFIG_SCHED_DEBUG is not set
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1416,7 +1487,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1426,35 +1496,37 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
-CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_NOP_TRACER=y
+# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
-CONFIG_RING_BUFFER=y
-CONFIG_TRACING=y
-
-#
-# Tracers
-#
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-CONFIG_CONTEXT_SWITCH_TRACER=y
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
-# CONFIG_FTRACE_STARTUP_TEST is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
# CONFIG_DEBUG_USER is not set
CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
@@ -1473,7 +1545,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
@@ -1483,10 +1554,12 @@ CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=m
# CONFIG_CRYPTO_TEST is not set
@@ -1514,11 +1587,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1555,6 +1630,7 @@ CONFIG_CRYPTO_FCRYPT=m
# Compression
#
CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
@@ -1562,6 +1638,7 @@ CONFIG_CRYPTO_DEFLATE=m
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
@@ -1575,9 +1652,12 @@ CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
-CONFIG_PLIST=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/recipes/linux/linux-sgh-i900/wm97xx-ts-fix.patch b/recipes/linux/linux-sgh-i900/wm97xx-ts-fix.patch
deleted file mode 100644
index 1a36c337c5..0000000000
--- a/recipes/linux/linux-sgh-i900/wm97xx-ts-fix.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-diff -ru git/drivers/input/touchscreen/wm97xx-core.c and/wm97xx-core.c
---- git/drivers/input/touchscreen/wm97xx-core.c 2009-11-14 20:38:03.000000000 +0200
-+++ git/drivers/input/touchscreen/wm97xx-core.c 2009-11-16 13:21:13.949140354 +0200
-@@ -70,13 +70,11 @@
- * Documentation/input/input-programming.txt for more details.
- */
-
--
--static int abs_x[3] = {350, 3900, 5};
-+static int abs_x[3] = {350, 3900, 5};
- module_param_array(abs_x, int, NULL, 0);
- MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz");
-
--
--static int abs_y[3] = {320, 3950, 5}; // Zylonite: 320, 3950
-+static int abs_y[3] = {320, 3750, 40};
- module_param_array(abs_y, int, NULL, 0);
- MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz");
-
-@@ -411,7 +409,6 @@
- wm->pen_is_down = 0;
- dev_dbg(wm->dev, "pen up\n");
- input_report_abs(wm->input_dev, ABS_PRESSURE, 0);
-- input_report_key(wm->input_dev, BTN_TOUCH, 0);
- input_sync(wm->input_dev);
- } else if (!(rc & RC_AGAIN)) {
- /* We need high frequency updates only while
-@@ -429,22 +426,13 @@
- }
-
- } else if (rc & RC_VALID) {
-- int absy, absx;
- dev_dbg(wm->dev,
- "pen down: x=%x:%d, y=%x:%d, pressure=%x:%d\n",
- data.x >> 12, data.x & 0xfff, data.y >> 12,
- data.y & 0xfff, data.p >> 12, data.p & 0xfff);
-- absx = data.x & 0xfff;
-- if (machine_is_sgh_i780())
-- absx = (wm->input_dev->absmax[ABS_X] - absx) + wm->input_dev->absmin[ABS_X];
-- input_report_abs(wm->input_dev, ABS_X, absx);
-- //invert y coordinate
-- absy = data.y & 0xfff;
-- if (machine_is_sgh_i900())
-- absy = (wm->input_dev->absmax[ABS_Y] - absy) + wm->input_dev->absmin[ABS_Y];
-- input_report_abs(wm->input_dev, ABS_Y, absy);
-+ input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff);
-+ input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff);
- input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff);
-- input_report_key(wm->input_dev, BTN_TOUCH, 1);
- input_sync(wm->input_dev);
- wm->pen_is_down = 1;
- wm->ts_reader_interval = wm->ts_reader_min_interval;
-@@ -641,23 +629,12 @@
- wm->input_dev->open = wm97xx_ts_input_open;
- wm->input_dev->close = wm97xx_ts_input_close;
- set_bit(EV_ABS, wm->input_dev->evbit);
-- set_bit(EV_KEY, wm->input_dev->evbit);
- set_bit(ABS_X, wm->input_dev->absbit);
- set_bit(ABS_Y, wm->input_dev->absbit);
- set_bit(ABS_PRESSURE, wm->input_dev->absbit);
-- set_bit(BTN_TOUCH, wm->input_dev->keybit);
--
-- if(machine_is_sgh_i780()){
-- input_set_abs_params(wm->input_dev, ABS_X, 350, 3900, 5, 0);
-- } else if(machine_is_sgh_i900()){
-- input_set_abs_params(wm->input_dev, ABS_X, 0, 39181660, 5, 0);
-- } else input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
-+ input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
- abs_x[2], 0);
-- if(machine_is_sgh_i780()){
-- input_set_abs_params(wm->input_dev, ABS_Y, 290, 3900, 5, 0);
-- } else if(machine_is_sgh_i900()){
-- input_set_abs_params(wm->input_dev, ABS_Y, 0, 65412060, 5, 0);
-- } else input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
-+ input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
- abs_y[2], 0);
- input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
- abs_p[2], 0);
-diff -ru git/drivers/input/touchscreen/zylonite-wm97xx.c and/zylonite-wm97xx.c
---- git/drivers/input/touchscreen/zylonite-wm97xx.c 2009-11-14 20:38:03.000000000 +0200
-+++ git/drivers/input/touchscreen/zylonite-wm97xx.c 2009-11-16 13:17:21.292645713 +0200
-@@ -76,9 +76,6 @@
- module_param(ac97_touch_slot, int, 0);
- MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
-
--static int calibration[7] = {11877, 137, -4688902, 231, -17973, 69206765, 163940}; //omnia calibration parameters
--
--
- /* flush AC97 slot 5 FIFO machines */
- static void wm97xx_acc_pen_up(struct wm97xx *wm)
- {
-@@ -101,7 +98,6 @@
- {
- u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
- int reads = 0;
-- int absx, absy;
- static u16 last, tries;
- static int skip_one;
-
-@@ -149,27 +145,9 @@
-
- /* coordinate is good */
- tries = 0;
-- if(machine_is_sgh_i900()){
-- x &= 0xfff;
-- y &= 0xfff;
-- absx = (calibration[0] * x + calibration[1] * y +
-- calibration[2]);// / calibration[6];
-- absy = (calibration[3] * x + calibration[4] * y +
-- calibration[5]);// / calibration[6];
-- if(absx<0) absx = 0;
-- if(absy<0) absy = 0;
-- } else {
-- absx = x & 0xfff;
-- if (machine_is_sgh_i780())
-- absx = (wm->input_dev->absmax[ABS_X] - absx) + wm->input_dev->absmin[ABS_X];
--
-- absy = y & 0xfff;
-- }
--
-- input_report_abs(wm->input_dev, ABS_X, absx);
-- input_report_abs(wm->input_dev, ABS_Y, absy);
-- p &= 0xfff;
-- input_report_abs(wm->input_dev, ABS_PRESSURE, p);
-+ input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
-+ input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
-+ input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
- input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
- input_sync(wm->input_dev);
- reads++;
diff --git a/recipes/linux/linux-sgh-i900_2.6.29.bb b/recipes/linux/linux-sgh-i900_2.6.32.bb
index 5bd2f4f0f3..4b5df5b7aa 100644
--- a/recipes/linux/linux-sgh-i900_2.6.29.bb
+++ b/recipes/linux/linux-sgh-i900_2.6.32.bb
@@ -1,4 +1,4 @@
-DESCRIPTION = "Linux 2.6.29 kernel for the Samsung Omnia SGH-i900."
+DESCRIPTION = "Linux 2.6.32 kernel for the Samsung Omnia SGH-i900."
SECTION = "kernel"
LICENSE = "GPL"
@@ -6,11 +6,11 @@ RDEPENDS += "marvell-gspi-fw"
COMPATIBLE_MACHINE = "sgh-i900"
-SRC_URI = "git://andromnia.git.sourceforge.net/gitroot/andromnia/andromnia;protocol=git;branch=master \
- file://wm97xx-ts-fix.patch;patch=1 \
+SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ file://sgh-i900-support.patch;patch=1 \
file://sgh_i900_defconfig"
-S = "${WORKDIR}/git"
+S = "${WORKDIR}/linux-${PV}"
inherit kernel
@@ -18,4 +18,4 @@ FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
do_configure_prepend() {
install -m 0644 ${WORKDIR}/sgh_i900_defconfig ${S}/.config
-}
+} \ No newline at end of file
diff --git a/recipes/linux/linux_2.6.18.bb b/recipes/linux/linux_2.6.18.bb
index b445325f2d..df7641565a 100644
--- a/recipes/linux/linux_2.6.18.bb
+++ b/recipes/linux/linux_2.6.18.bb
@@ -6,11 +6,12 @@ require linux.inc
DEFAULT_PREFERENCE = "-1"
DEFAULT_PREFERENCE_avr32 = "1"
-PR = "r0"
+PR = "r1"
PARALLEL_MAKE=""
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.8.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.20.bb b/recipes/linux/linux_2.6.20.bb
index d8427bfc6f..d46c623e6b 100644
--- a/recipes/linux/linux_2.6.20.bb
+++ b/recipes/linux/linux_2.6.20.bb
@@ -9,6 +9,8 @@ DEFAULT_PREFERENCE_nhk15 = "1"
PR = "r10"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.21.bz2;patch=1 \
+ file://0001-kbuild-include-limits.h-in-sumversion.c-for-PATH_MAX.patch;patch=1 \
file://defconfig"
SRC_URI_append_n2100 = "\
diff --git a/recipes/linux/linux_2.6.21+2.6.22-rc1.bb b/recipes/linux/linux_2.6.21+2.6.22-rc1.bb
index 35b2efe74a..563820fec7 100644
--- a/recipes/linux/linux_2.6.21+2.6.22-rc1.bb
+++ b/recipes/linux/linux_2.6.21+2.6.22-rc1.bb
@@ -9,7 +9,7 @@ KERNEL_RELEASE = "2.6.22-rc1"
PR = "r2"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.21.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-2.6.22-rc1.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.22/patch-2.6.22-rc1.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.21.bb b/recipes/linux/linux_2.6.21.bb
index c4c7297f1c..bbac0c2ae9 100644
--- a/recipes/linux/linux_2.6.21.bb
+++ b/recipes/linux/linux_2.6.21.bb
@@ -5,9 +5,10 @@ DEFAULT_PREFERENCE_at91sam9263ek = "-1"
DEFAULT_PREFERENCE_gumstix-connex = "1"
DEFAULT_PREFERENCE_gumstix-verdex = "1"
-PR = "r12"
+PR = "r13"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.7.bz2;patch=1 \
file://tsc2003.c \
file://tsc2003-config.diff;patch=1 \
file://defconfig \
diff --git a/recipes/linux/linux_2.6.22+2.6.23-rc3.bb b/recipes/linux/linux_2.6.22+2.6.23-rc3.bb
index 47c3564087..e52800a8f4 100644
--- a/recipes/linux/linux_2.6.22+2.6.23-rc3.bb
+++ b/recipes/linux/linux_2.6.22+2.6.23-rc3.bb
@@ -10,7 +10,7 @@ KERNEL_RELEASE = "2.6.23-rc3"
PR = "r1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${BASE_KERNEL_VERSION}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-${KERNEL_VERSION}.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.23/patch-${KERNEL_VERSION}.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.22+2.6.23-rc5.bb b/recipes/linux/linux_2.6.22+2.6.23-rc5.bb
index 3165c39a11..e21855269f 100644
--- a/recipes/linux/linux_2.6.22+2.6.23-rc5.bb
+++ b/recipes/linux/linux_2.6.22+2.6.23-rc5.bb
@@ -10,7 +10,7 @@ KERNEL_VERSION = "2.6.23-rc5"
KERNEL_RELEASE = "2.6.23-rc5"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${BASE_KERNEL_VERSION}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-${KERNEL_VERSION}.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.23/patch-${KERNEL_VERSION}.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.22.6.bb b/recipes/linux/linux_2.6.22.6.bb
deleted file mode 100644
index 69b988e332..0000000000
--- a/recipes/linux/linux_2.6.22.6.bb
+++ /dev/null
@@ -1,31 +0,0 @@
-require linux.inc
-
-DEFAULT_PREFERENCE = "-1"
-DEFAULT_PREFERENCE_ts72xx = "1"
-DEFAULT_PREFERENCE_mx31moboard = "1"
-
-PR = "r1"
-
-SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.22.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.bz2;patch=1 \
- file://defconfig \
- "
-
-SRC_URI_append_ts72xx = "\
- file://ep93xx-gpio-interrupt-debounce.diff;patch=1 \
- file://ep93xx-i2c-bus.diff;patch=1 \
- file://ep93xx-i2c.diff;patch=1 \
- file://ep93xx-leds.diff;patch=1 \
- file://ep93xx-serial-uartbaud.diff;patch=1 \
- file://ep93xx-serial-clocks.diff;patch=1 \
- file://ep93xx-timer-accuracy.diff;patch=1 \
- file://ep93xx-maverick-uniqid.patch;patch=1 \
- file://ts72xx-nfbit-fix.patch;patch=1 \
- file://ts72xx-machine-id-fix.patch;patch=1 \
- file://ts72xx-watchdog.patch;patch=1 \
- file://ts72xx-use-cpld-reset.patch;patch=1 \
- "
-
-SRC_URI_append_mx31moboard = "http://mobots.epfl.ch/mx31moboard/linux-2.6.22-moboard.patch.bz2;patch=1"
-
-S = "${WORKDIR}/linux-2.6.22"
diff --git a/recipes/linux/linux_2.6.22.bb b/recipes/linux/linux_2.6.22.bb
index bb8749f0d6..1e73139c5e 100644
--- a/recipes/linux/linux_2.6.22.bb
+++ b/recipes/linux/linux_2.6.22.bb
@@ -4,10 +4,12 @@ require linux.inc
DEFAULT_PREFERENCE = "-1"
DEFAULT_PREFERENCE_cm-x270 = "-1"
DEFAULT_PREFERENCE_bd-neon = "0"
+DEFAULT_PREFERENCE_mx31moboard = "1"
-PR = "r5"
+PR = "r6"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.22.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.19.bz2;patch=1 \
file://defconfig \
"
@@ -21,6 +23,8 @@ SRC_URI_append_cm-x270 = "\
file://0007-mmcsd_large_cards-r0.patch;patch=1 \
file://0008-cm-x270-nand-simplify-name.patch;patch=1"
+SRC_URI_append_mx31moboard = "http://mobots.epfl.ch/mx31moboard/linux-2.6.22-moboard.patch.bz2;patch=1"
+
CMDLINE_cm-x270 = "console=${CMX270_CONSOLE_SERIAL_PORT},38400 monitor=8 bpp=16 mem=64M mtdparts=physmap-flash.0:256k(boot)ro,0x180000(kernel),-(root);cm-x270-nand:64m(app),-(data) rdinit=/sbin/init root=mtd3 rootfstype=jffs2"
FILES_kernel-image_cm-x270 = ""
diff --git a/recipes/linux/linux_2.6.23+2.6.24-rc5.bb b/recipes/linux/linux_2.6.23+2.6.24-rc5.bb
index d22d5e8f27..32a3ac0a70 100644
--- a/recipes/linux/linux_2.6.23+2.6.24-rc5.bb
+++ b/recipes/linux/linux_2.6.23+2.6.24-rc5.bb
@@ -6,7 +6,7 @@ DEFAULT_PREFERENCE = "-1"
PR = "r3"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-2.6.24-rc5.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.24/patch-2.6.24-rc5.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.23+2.6.24-rc6.bb b/recipes/linux/linux_2.6.23+2.6.24-rc6.bb
index 73100b62c3..035e6054f6 100644
--- a/recipes/linux/linux_2.6.23+2.6.24-rc6.bb
+++ b/recipes/linux/linux_2.6.23+2.6.24-rc6.bb
@@ -6,7 +6,7 @@ DEFAULT_PREFERENCE = "-1"
PR = "r0"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-2.6.24-rc6.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.24/patch-2.6.24-rc6.bz2;patch=1 \
file://defconfig \
"
diff --git a/recipes/linux/linux_2.6.23.bb b/recipes/linux/linux_2.6.23.bb
index 0f62a1c37c..65f400d6b6 100644
--- a/recipes/linux/linux_2.6.23.bb
+++ b/recipes/linux/linux_2.6.23.bb
@@ -6,7 +6,7 @@ DEFAULT_PREFERENCE_mpc8313e-rdb = "1"
DEFAULT_PREFERENCE_mpc8323e-rdb = "1"
DEFAULT_PREFERENCE_avr32 = "1"
-PR = "r12"
+PR = "r13"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
file://binutils-buildid-arm.patch;patch=1 \
@@ -15,22 +15,23 @@ SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
"
# Bug fixes on the 2.6.23.x stable branch
-SRC_URI += "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.23.12.bz2;patch=1"
+SRC_URI += "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.23.17.bz2;patch=1"
# Real-time preemption (includes CFS). This is experimental and requires a different defconfig.
#SRC_URI += "file://patch-2.6.23.12-rt14;patch=1"
-# Only the Completely Fair Scheduler (CFS), the official backport from 2.6.24
-SRC_URI += "http://people.redhat.com/mingo/cfs-scheduler/sched-cfs-v2.6.23.12-v24.1.patch;patch=1"
+# Only the Completely Fair Scheduler (CFS), the official backport from 2.6.24 (adapted for 2.6.23.17)
+SRC_URI += "file://sched-cfs-v2.6.23.12-v24.1.patch;patch=1"
# Add support for squashfs-lzma (a highly compressed read-only filesystem)
SRC_URI += "http://kamikaze.waninkoko.info/patches/2.6.23/klight1/broken-out/squashfs-lzma-2.6.23.patch;patch=1"
+SRC_URI += "file://time.h.patch;patch=1"
+
# The Atmel patch doesn't apply against 2.6.23.12 :(
SRC_URI_avr32 = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.23.tar.bz2 \
file://defconfig \
http://avr32linux.org/twiki/pub/Main/LinuxPatches/linux-2.6.23.atmel.3.patch.bz2;patch=1 \
"
SRC_URI_append_em-x270 = "\
- file://em-x270.patch;patch=1 \
- file://01-prevent_loop_timespec_add_ns.patch;patch=1"
+ file://em-x270.patch;patch=1 "
SRC_URI_append_cm-x270 = "\
file://0001-cm-x270-base2.patch;patch=1 \
diff --git a/recipes/linux/linux_2.6.24.bb b/recipes/linux/linux_2.6.24.bb
index cb4389a702..c4549795d6 100644
--- a/recipes/linux/linux_2.6.24.bb
+++ b/recipes/linux/linux_2.6.24.bb
@@ -13,12 +13,12 @@ DEFAULT_PREFERENCE_hipox = "1"
DEFAULT_PREFERENCE_cs-e9302 = "1"
DEFAULT_PREFERENCE_smartq5 = "1"
-PR = "r33"
+PR = "r34"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.7.bz2;patch=1 \
file://squashfs-lzma-2.6.24.patch;patch=1 \
file://ubifs-v2.6.24.patch;patch=1 \
- file://time.h.patch;patch=1 \
file://defconfig"
# Moved away temporarely until committed properly (work in progress).
diff --git a/recipes/linux/linux_2.6.25.20.bb b/recipes/linux/linux_2.6.25.20.bb
deleted file mode 100644
index 6440e0650b..0000000000
--- a/recipes/linux/linux_2.6.25.20.bb
+++ /dev/null
@@ -1,33 +0,0 @@
-###########################################
-#@MAINTAINER: Marco Cavallini <m.cavallini@koansoftware.com>
-# linux_2.6.25.20.bb
-# recipe file for PM9261 and PM9263
-###########################################
-
-require linux.inc
-
-PR = "r2"
-
-DEFAULT_PREFERENCE = "-1"
-DEFAULT_PREFERENCE_ronetix-pm9263 = "1"
-DEFAULT_PREFERENCE_ronetix-pm9261 = "1"
-
-SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.25.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.25.20.bz2;patch=1 \
- file://defconfig"
-
-# WARNING: for following patched is required the proper entry in conf/checksums.ini
-
-SRC_URI_append_ronetix-pm9263 = " \
- http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
- "
-
-SRC_URI_append_ronetix-pm9261 = " \
- http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
- http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
- "
-
-S = "${WORKDIR}/linux-2.6.25/"
diff --git a/recipes/linux/linux_2.6.25.bb b/recipes/linux/linux_2.6.25.bb
index b6ed3d82e2..a89374a652 100644
--- a/recipes/linux/linux_2.6.25.bb
+++ b/recipes/linux/linux_2.6.25.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r6"
+PR = "r8"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -13,8 +13,11 @@ DEFAULT_PREFERENCE_alix = "1"
DEFAULT_PREFERENCE_at32stk1000 = "1"
DEFAULT_PREFERENCE_at91-l9260 = "1"
DEFAULT_PREFERENCE_m8050 = "1"
+DEFAULT_PREFERENCE_ronetix-pm9263 = "1"
+DEFAULT_PREFERENCE_ronetix-pm9261 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.25.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.20.bz2;patch=1 \
file://defconfig"
SRC_URI_append_mpc8313e-rdb = "\
@@ -41,6 +44,18 @@ SRC_URI_append_at91-l9260 = " \
http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
"
+SRC_URI_append_ronetix-pm9263 = " \
+ http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
+"
+
+SRC_URI_append_ronetix-pm9261 = " \
+ http://maxim.org.za/AT91RM9200/2.6/2.6.25-at91.patch.gz;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/linux-2.6.25.4-ronetix-08-11-02.2228.patch;patch=1 \
+ http://download.ronetix.info/sk-eb926x/linux/kernel/2.6.25.4/socketcan-driver-at91.patch;patch=1 \
+"
+
SRC_URI_append_m8050 = " file://m8050.diff;patch=1 file://update-mach-types.diff;patch=1"
CMDLINE_cm-x270 = "console=${CMX270_CONSOLE_SERIAL_PORT},38400 monitor=1 mem=64M mtdparts=physmap-flash.0:256k(boot)ro,0x180000(kernel),-(root);cm-x270-nand:64m(app),-(data) rdinit=/sbin/init root=mtd3 rootfstype=jffs2"
diff --git a/recipes/linux/linux_2.6.26.bb b/recipes/linux/linux_2.6.26.bb
index 53d55577b0..775d2d8ba1 100644
--- a/recipes/linux/linux_2.6.26.bb
+++ b/recipes/linux/linux_2.6.26.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r9"
+PR = "r10"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -11,6 +11,7 @@ DEFAULT_PREFERENCE_topas910 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.26.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.8.bz2;patch=1 \
file://defconfig"
SRC_URI_append_boc01 = "\
@@ -36,8 +37,7 @@ SRC_URI_append_mpc8313e-rdb = "\
file://mpc8313e-rdb-eth-fixed.patch;patch=1 \
"
-SRC_URI_append_topas910 = " ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-2.6.26.5.bz2;patch=1 \
- http://www.bplan-gmbh.org/data/toshiba/topas/linux/2.6.26.5/patch_2.6.26.5_topas910.bz2;patch=1"
+SRC_URI_append_topas910 = "http://www.bplan-gmbh.org/data/toshiba/topas/linux/2.6.26.5/patch_2.6.26.5_topas910.bz2;patch=1"
# see http://bugzilla.kernel.org/show_bug.cgi?id=11143
do_stage_append() {
diff --git a/recipes/linux/linux_2.6.27.bb b/recipes/linux/linux_2.6.27.bb
index 0b873a65a7..9e8d6a32e0 100644
--- a/recipes/linux/linux_2.6.27.bb
+++ b/recipes/linux/linux_2.6.27.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r8"
+PR = "r11"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -9,6 +9,7 @@ DEFAULT_PREFERENCE_progear = "1"
DEFAULT_PREFERENCE_simpad = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.41.bz2;patch=1 \
file://defconfig "
SRC_URI_append_boc01 = "\
diff --git a/recipes/linux/linux_2.6.28.bb b/recipes/linux/linux_2.6.28.bb
index 98cf19129d..05fe815d98 100644
--- a/recipes/linux/linux_2.6.28.bb
+++ b/recipes/linux/linux_2.6.28.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r12"
+PR = "r13"
# Mark archs/machines that this kernel supports
DEFAULT_PREFERENCE = "-1"
@@ -14,6 +14,7 @@ DEFAULT_PREFERENCE_wrap = "1"
DEFAULT_PREFERENCE_tx27 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.28.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.10.bz2;patch=1 \
file://defconfig"
SRC_URI_append_at91sam9263ek = " \
diff --git a/recipes/linux/linux_2.6.29+2.6.30-rc5.bb b/recipes/linux/linux_2.6.29+2.6.30-rc5.bb
index 2a38a546e0..08cb2fe3c5 100644
--- a/recipes/linux/linux_2.6.29+2.6.30-rc5.bb
+++ b/recipes/linux/linux_2.6.29+2.6.30-rc5.bb
@@ -19,6 +19,6 @@ DEFAULT_PREFERENCE_spitz = "-1"
DEFAULT_PREFERENCE_tosa = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${OLD_KERNEL_RELEASE}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/patch-${KERNEL_RELEASE}.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/testing/v2.6.30/patch-${KERNEL_RELEASE}.bz2;patch=1 \
file://defconfig"
diff --git a/recipes/linux/linux_2.6.29.bb b/recipes/linux/linux_2.6.29.bb
index f78193d711..240faf224d 100644
--- a/recipes/linux/linux_2.6.29.bb
+++ b/recipes/linux/linux_2.6.29.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r9"
+PR = "r10"
S = "${WORKDIR}/linux-2.6.29"
@@ -19,6 +19,7 @@ DEFAULT_PREFERENCE_tqm8540 = "1"
DEFAULT_PREFERENCE_stamp9g20evb = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.29.tar.bz2 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.6.bz2;patch=1 \
file://defconfig"
SRC_URI_append_boc01 = "\
@@ -42,14 +43,12 @@ SRC_URI_append_micro2440 = " \
file://0002-S3C-Backported-openmoko-s-touchscreen-filters.patch;patch=1 \
file://0003-VENDOR-armworks-logo.patch;patch=1 \
file://0004-920T-Use-specific-920t-mtune.patch;patch=1 \
- file://0005-920T-Temp-fix-for-the-40-relocation-binutils-pro.patch;patch=1 \
file://0006-S3C-Allow-the-machine-code-to-get-the-BBT-table-fro.patch;patch=1 \
file://0007-MINI2440-Add-machine-support.patch;patch=1 \
file://0008-MINI2440-Delays-command-check-response-on-SD.patch;patch=1 \
file://0009-MINI2440-Rename-the-SoC-tty-names.patch;patch=1 \
file://0010-MINI2440-creates-a-mini2440_defconfig-file.patch;patch=1 \
file://0011-MINI2440-Add-touchscreen-support.patch;patch=1 \
- file://0012-GRO-Disable-GRO-on-legacy-netif_rx-path.patch;patch=1 \
"
SRC_URI_append_tosa = " \
diff --git a/recipes/linux/linux_2.6.30.bb b/recipes/linux/linux_2.6.30.bb
index c30caa4b36..19247e2e5b 100644
--- a/recipes/linux/linux_2.6.30.bb
+++ b/recipes/linux/linux_2.6.30.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r4"
+PR = "r5"
S = "${WORKDIR}/linux-${PV}"
@@ -16,7 +16,7 @@ DEFAULT_PREFERENCE_at91sam9263ek = "-1"
DEFAULT_PREFERENCE_tosa = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.9.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.10.bz2;patch=1 \
http://maxim.org.za/AT91RM9200/2.6/2.6.30-at91.patch.gz;patch=1 \
file://aufs2-30.patch;patch=1 \
file://defconfig"
diff --git a/recipes/linux/linux_2.6.31.bb b/recipes/linux/linux_2.6.31.bb
index 839e808451..dcce779ebd 100644
--- a/recipes/linux/linux_2.6.31.bb
+++ b/recipes/linux/linux_2.6.31.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r6"
+PR = "r8"
S = "${WORKDIR}/linux-${PV}"
@@ -16,7 +16,7 @@ DEFAULT_PREFERENCE_ben-nanonote = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
- ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.5.bz2;patch=1 \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.8.bz2;patch=1 \
file://defconfig"
SRC_URI += "file://0001-Squashfs-move-zlib-decompression-wrapper-code-into.patch;patch=1 \
diff --git a/recipes/linux/linux_2.6.32.bb b/recipes/linux/linux_2.6.32.bb
index 99db01bf8b..be3d8ad7c6 100644
--- a/recipes/linux/linux_2.6.32.bb
+++ b/recipes/linux/linux_2.6.32.bb
@@ -1,6 +1,6 @@
require linux.inc
-PR = "r1"
+PR = "r2"
S = "${WORKDIR}/linux-${PV}"
@@ -15,6 +15,7 @@ DEFAULT_PREFERENCE_spitz = "-1"
DEFAULT_PREFERENCE_tosa = "-1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2;name=kernel \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.1.bz2;patch=1 \
file://defconfig"
SRC_URI[kernel.md5sum] = "260551284ac224c3a43c4adac7df4879"
diff --git a/recipes/openmoko-3rdparty/guitartune_svn.bb b/recipes/openmoko-3rdparty/guitartune_svn.bb
new file mode 100644
index 0000000000..37fbd5ffa3
--- /dev/null
+++ b/recipes/openmoko-3rdparty/guitartune_svn.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "guitar tuner for openmoko phones"
+HOMEPAGE = "http://code.google.com/p/guitartune"
+AUTHOR = "cchandel"
+LICENSE = "GPLv2"
+SECTION = "e/apps"
+DEPENDS = "gtk+ libglade fftw sqlite3"
+
+PV = "0.36+svnr${SRCPV}"
+
+SRC_URI = "svn://guitartune.googlecode.com/svn;module=trunk;proto=http"
+S = "${WORKDIR}/trunk"
+
+inherit autotools
+
+do_install_append() {
+ install -d "${D}/${datadir}/pixmaps"
+ install -m 0644 "${S}/resources/guitartune.png" "${D}/${datadir}/pixmaps"
+ install -d "${D}/${datadir}/applications"
+ install -m 0644 "${S}/resources/guitartune.desktop" "${D}/${datadir}/applications"
+ install -d "${D}/${datadir}/guitartune"
+ for ico in "${S}/resources/"*.png; do
+ if [ "$(basename $ico)" != "guitartune.png" ]; then
+ install -m 0644 $ico "${D}/${datadir}/guitartune"
+ fi
+ done
+}
+
+FILES_${PN} += "/usr/share/guitartune/* /usr/share/applications/* /usr/share/pixmaps/*"
diff --git a/recipes/powervr-drivers/libgles-omap3.inc b/recipes/powervr-drivers/libgles-omap3.inc
index ddec910efe..360cb7790e 100644
--- a/recipes/powervr-drivers/libgles-omap3.inc
+++ b/recipes/powervr-drivers/libgles-omap3.inc
@@ -1,6 +1,7 @@
DESCRIPTION = "libGLES for the omap3"
LICENCE = "proprietary-binary"
+COMPATIBLE_MACHINE = "(am3517-evm|beagleboard|cm-t35|igep0020|omap3-pandora|omap3-touchbook|omap3evm|omapzoom|omapzoom2|overo|palmpre)"
RDEPENDS_${PN} += "devmem2"
#HACK! These are binaries, so we can't guarantee that LDFLAGS match :(
diff --git a/recipes/qt4/qt-4.6.0.inc b/recipes/qt4/qt-4.6.0.inc
index 97ad6cdd74..b29b708d74 100644
--- a/recipes/qt4/qt-4.6.0.inc
+++ b/recipes/qt4/qt-4.6.0.inc
@@ -1,7 +1,5 @@
DEFAULT_PREFERENCE = "-1"
-require qt4-embedded.inc
-
SRC_URI = "ftp://ftp.trolltech.com/qt/source/qt-everywhere-opensource-src-${PV}.tar.gz \
file://0001-cross-compile.patch;patch=1 \
file://0002-fix-resinit-declaration.patch;patch=1 \
diff --git a/recipes/qt4/qt4-embedded-gles_4.6.0.bb b/recipes/qt4/qt4-embedded-gles_4.6.0.bb
index f988f9828b..f819c66212 100644
--- a/recipes/qt4/qt4-embedded-gles_4.6.0.bb
+++ b/recipes/qt4/qt4-embedded-gles_4.6.0.bb
@@ -1,4 +1,4 @@
-
+require qt4-embedded.inc
PR = "${INC_PR}.0"
QT_GLFLAGS = "-opengl es2 -openvg"
diff --git a/recipes/qt4/qt4-embedded_4.6.0.bb b/recipes/qt4/qt4-embedded_4.6.0.bb
index 3bde65014d..58fda71ad1 100644
--- a/recipes/qt4/qt4-embedded_4.6.0.bb
+++ b/recipes/qt4/qt4-embedded_4.6.0.bb
@@ -1,3 +1,4 @@
+require qt4-embedded.inc
PR = "${INC_PR}.0"
diff --git a/recipes/qt4/qt4-x11-free-gles_4.6.0.bb b/recipes/qt4/qt4-x11-free-gles_4.6.0.bb
index 36b6ec5a25..fcb935b8c7 100644
--- a/recipes/qt4/qt4-x11-free-gles_4.6.0.bb
+++ b/recipes/qt4/qt4-x11-free-gles_4.6.0.bb
@@ -1,4 +1,4 @@
-
+require qt4-x11-free.inc
PR = "${INC_PR}.0"
QT_GLFLAGS = "-opengl es2 -openvg"
diff --git a/recipes/qt4/qt4-x11-free_4.6.0.bb b/recipes/qt4/qt4-x11-free_4.6.0.bb
index 1b5b4524c8..80a25303bb 100644
--- a/recipes/qt4/qt4-x11-free_4.6.0.bb
+++ b/recipes/qt4/qt4-x11-free_4.6.0.bb
@@ -1,3 +1,4 @@
+require qt4-x11-free.inc
PR = "${INC_PR}.0"
require qt-4.6.0.inc
diff --git a/recipes/qt4/qt4.inc b/recipes/qt4/qt4.inc
index 7d600a825c..0671a6c570 100644
--- a/recipes/qt4/qt4.inc
+++ b/recipes/qt4/qt4.inc
@@ -71,6 +71,7 @@ python __anonymous () {
}
OTHER_PACKAGES = "\
+ ${QT_BASE_NAME}-tools \
${QT_BASE_NAME}-assistant \
${QT_BASE_NAME}-common \
${QT_BASE_NAME}-dbus \
@@ -95,6 +96,7 @@ RRECOMMENDS_${PN} = "${LIB_PACKAGES} ${OTHER_PACKAGES}"
RRECOMMENDS_${PN}-dev = "${DEV_PACKAGES}"
RRECOMMENDS_${PN}-dbg = "${DBG_PACKAGES}"
+FILES_${QT_BASE_NAME}-tools = "${bindir}/uic* ${bindir}/moc ${bindir}/rcc ${bindir}/qttracereplay"
FILES_${QT_BASE_NAME}-assistant = "${bindir}/*assistant* ${bindir}/qcollectiongenerator ${bindir}/qhelpconverter ${bindir}/qhelpgenerator"
FILES_${QT_BASE_NAME}-assistant-dbg = "${bindir}/.debug/*assistant* ${bindir}/.debug/qcollectiongenerator ${bindir}/.debug/qhelpconverter ${bindir}/.debug/qhelpgenerator"
FILES_${QT_BASE_NAME}-common = "${bindir}/qtconfig"
@@ -257,58 +259,4 @@ do_install() {
touch ${D}/${libdir}/fonts/fontdir
}
-STAGE_TEMP = "${WORKDIR}/temp-staging"
-do_stage() {
- rm -rf ${STAGE_TEMP}
- mkdir -p ${STAGE_TEMP}
- oe_runmake install INSTALL_ROOT=${STAGE_TEMP}
-
- # fix pkgconfig, libtool and prl files
- sed -i -e s#-L${S}/lib##g \
- -e s#-L${STAGING_LIBDIR}##g \
- -e s#-L${libdir}##g \
- -e s#'$(OE_QMAKE_LIBS_X11)'#"${OE_QMAKE_LIBS_X11}"#g \
- ${STAGE_TEMP}${libdir}/*.la ${STAGE_TEMP}${libdir}/*.prl ${STAGE_TEMP}${libdir}/pkgconfig/*.pc
-
- # fix pkgconfig files
- sed -i -e s#"moc_location=.*$"## \
- -e s#"uic_location=.*$"## \
- ${STAGE_TEMP}/${libdir}/pkgconfig/*.pc
- for name in ${QT_LIB_NAMES}; do
- sed -i -e "/Requires/s#${name}#${name}${QT_LIBINFIX}#"g ${D}${libdir}/pkgconfig/*.pc
- done
-
- # fix libtool files
- sed -i -e s#installed=yes#installed=no#g ${STAGE_TEMP}/${libdir}/*.la
-
- # install headers
- install -d ${STAGING_INCDIR}
- cp -pPRf ${STAGE_TEMP}/${includedir}/* ${STAGING_INCDIR}/
-
- # install libraries
- install -d ${STAGING_LIBDIR}
- for i in ${STAGE_TEMP}/${libdir}/*.prl; do
- cp -pPRf $i ${STAGING_LIBDIR}
- cp -pPRf ${STAGE_TEMP}/${libdir}/$(basename $i .prl).la ${STAGING_LIBDIR} || true
- oe_libinstall -C ${STAGE_TEMP}/${libdir} -a $(basename $i .prl) ${STAGING_LIBDIR} || true
- oe_libinstall -C ${STAGE_TEMP}/${libdir} -so $(basename $i .prl) ${STAGING_LIBDIR} || true
- done
-
- # install pkgconfig files
- install -d ${STAGING_LIBDIR}/pkgconfig
- cp -pPRf ${STAGE_TEMP}/${libdir}/pkgconfig/*.pc ${STAGING_LIBDIR}/pkgconfig/
-
- # install mkspecs
- install -d ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs
- cp -pPRf ${STAGE_TEMP}/${datadir}/${QT_DIR_NAME}/mkspecs/* ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/
-
- rm -rf ${STAGE_TEMP}
-
- # FIXME: install symlinks to tools?
- #install -d ${STAGING_DATADIR}/${QT_DIR_NAME}/bin
- #ln -sf ${STAGING_BINDIR_NATIVE}/qmake2 ${STAGING_DATADIR}/${QT_DIR_NAME}/bin/qmake
- #for qttool in moc uic uic3 rcc lrelease lupdate; do
- # ln -sf ${STAGING_BINDIR_NATIVE}/${qttool}4 ${STAGING_DATADIR}/${QT_DIR_NAME}/bin/${qttool}
- #done
-}
diff --git a/recipes/qt4/wolfenqt-e_git.bb b/recipes/qt4/wolfenqt-e_git.bb
new file mode 100644
index 0000000000..661838842a
--- /dev/null
+++ b/recipes/qt4/wolfenqt-e_git.bb
@@ -0,0 +1,5 @@
+WOLFVARIANT = "e"
+require wolfenqt.inc
+
+
+
diff --git a/recipes/qt4/wolfenqt.inc b/recipes/qt4/wolfenqt.inc
new file mode 100644
index 0000000000..e1e08cf6ab
--- /dev/null
+++ b/recipes/qt4/wolfenqt.inc
@@ -0,0 +1,19 @@
+
+PE = "1"
+PV = "0.0"
+PR_append = "+gitr${SRCREV}"
+
+inherit qt4${WOLFVARIANT}
+
+SRCREV = "f43dfa2bfa1f72abd3500dfc94248b17c5f9ae05"
+SRC_URI = "git://gitorious.org/qt-labs/wolfenqt.git;protocol=git"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 qt3d ${D}${bindir}/wolfenqt-${WOLFVARIANT}
+}
+
+
+
diff --git a/recipes/qt4/wolfenqt_git.bb b/recipes/qt4/wolfenqt_git.bb
new file mode 100644
index 0000000000..2d762f3dc2
--- /dev/null
+++ b/recipes/qt4/wolfenqt_git.bb
@@ -0,0 +1,5 @@
+WOLFVARIANT = "x11"
+require wolfenqt.inc
+
+
+
diff --git a/recipes/shr/initscripts-shr/palmpre/usb-gadget.sh b/recipes/shr/initscripts-shr/palmpre/usb-gadget.sh
new file mode 100644
index 0000000000..d7554954d6
--- /dev/null
+++ b/recipes/shr/initscripts-shr/palmpre/usb-gadget.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# usb gadget configuration:
+# there already different configurations on the palm pre defined,
+# we choose nr 5 cause it provides the usbnet, novacom
+# and storage gadget
+if [ -e /sys/class/usb_gadget/config_num ]; then
+ echo 5 > /sys/class/usb_gadget/config_num
+fi
+
diff --git a/recipes/shr/initscripts-shr_0.0.1.bb b/recipes/shr/initscripts-shr_0.0.1.bb
index f1be1c2fc7..89df6ce375 100644
--- a/recipes/shr/initscripts-shr_0.0.1.bb
+++ b/recipes/shr/initscripts-shr_0.0.1.bb
@@ -5,7 +5,7 @@ DEPENDS = ""
RDEPENDS = ""
LICENSE = "GPL"
PV = "0.0.1"
-PR = "r13"
+PR = "r14"
RPROVIDES_${PN} = "initscripts"
RCONFLICTS_${PN} = "initscripts"
@@ -33,6 +33,7 @@ SRC_URI = "file://alignment.sh \
file://umountfs \
file://umountnfs.sh \
"
+SRC_URI_append_palmpre = " file://usb-gadget.sh"
inherit base
@@ -76,6 +77,11 @@ do_install () {
install -m 0755 ${WORKDIR}/umountfs ${D}${sysconfdir}/init.d
install -m 0755 ${WORKDIR}/umountnfs.sh ${D}${sysconfdir}/init.d
+ if [ "${MACHINE}" == "palmpre" ]; then
+ install -m 0755 ${WORKDIR}/usb-gadget.sh ${D}${sysconfdir}/init.d
+ ln -sf ../init.d/usb-gadget.sh ${D}${sysconfdir}/rcS.d/S00usb-gadget.sh
+ fi
+
#
# Create runlevel links
#
diff --git a/recipes/shr/phoneuid_git.bb b/recipes/shr/phoneuid_git.bb
index 64c1176379..6bf4eb828a 100644
--- a/recipes/shr/phoneuid_git.bb
+++ b/recipes/shr/phoneuid_git.bb
@@ -4,7 +4,9 @@ LICENSE = "GPL"
SECTION = "x11/applications"
DEPENDS += " dbus-glib libframeworkd-glib libphone-ui sqlite3 shr-specs"
PV = "0.0.0+gitr${SRCREV}"
-PR = "r4"
+PR = "r5"
+
+RREPLACES_${PN} = "shr-today"
SRC_URI = "git://git.shr-project.org/repo/phoneuid.git;protocol=http;branch=master"
S = "${WORKDIR}/git"
diff --git a/recipes/shr/shr-launcher_svn.bb b/recipes/shr/shr-launcher_svn.bb
index 807d3a2cea..481be9021b 100644
--- a/recipes/shr/shr-launcher_svn.bb
+++ b/recipes/shr/shr-launcher_svn.bb
@@ -8,15 +8,9 @@ DEPENDS = "elementary eina edbus"
PV = "0.0.1+svnr${SRCPV}"
PR = "r4"
-SRC_URI = "svn://shr-launcher.googlecode.com/svn/trunk;module=.;proto=http"
+SRC_URI = "svn://shr-launcher.googlecode.com/svn;module=trunk;proto=http"
-do_configure_prepend() {
- # all links to /usr/share/automake-1.10/
- rm -f ${S}/depcomp ${S}/config.guess ${S}/config.sub ${S}/INSTALL ${S}/install-sh ${S}/missing
- touch ${S}/INSTALL
-}
-
-S = "${WORKDIR}"
+S = "${WORKDIR}/trunk"
inherit autotools
diff --git a/recipes/tasks/task-shr-feed.bb b/recipes/tasks/task-shr-feed.bb
index 1e5706d777..4b3e764d5e 100644
--- a/recipes/tasks/task-shr-feed.bb
+++ b/recipes/tasks/task-shr-feed.bb
@@ -27,6 +27,7 @@ RDEPENDS_${PN} += "\
gpe-timesheet \
gpe-contacts \
gtkmm \
+ guitartune \
mc \
mplayer \
navit \
diff --git a/recipes/tasks/task-shr-minimal.bb b/recipes/tasks/task-shr-minimal.bb
index caf2597ae0..c30539c607 100644
--- a/recipes/tasks/task-shr-minimal.bb
+++ b/recipes/tasks/task-shr-minimal.bb
@@ -1,5 +1,5 @@
DESCRIPTION = "SHR Lite Image Feed"
-PR = "r18"
+PR = "r19"
PV = "2.0"
LICENSE = "GPL"
@@ -136,7 +136,6 @@ RDEPENDS_${PN}-apps = "\
ffalarms \
shr-settings \
shr-theme \
- shr-today \
calc \
"
diff --git a/recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch b/recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch
deleted file mode 100644
index 8c2b85db13..0000000000
--- a/recipes/uclibc/uclibc-nptl/uclibc_rpc_thread.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-Index: git/libc/inet/rpc/rpc_thread.c
-===================================================================
---- git.orig/libc/inet/rpc/rpc_thread.c 2009-12-04 13:13:09.000000000 -0800
-+++ git/libc/inet/rpc/rpc_thread.c 2009-12-04 13:13:17.000000000 -0800
-@@ -14,6 +14,7 @@
- #ifdef __UCLIBC_HAS_THREADS__
-
- #include <bits/libc-tsd.h>
-+#include <bits/libc-lock.h>
-
- /* Variable used in non-threaded applications or for the first thread. */
- static struct rpc_thread_variables __libc_tsd_RPC_VARS_mem;
diff --git a/recipes/uclibc/uclibc.inc b/recipes/uclibc/uclibc.inc
index 903aea27b9..2045007e72 100644
--- a/recipes/uclibc/uclibc.inc
+++ b/recipes/uclibc/uclibc.inc
@@ -181,3 +181,6 @@ do_install() {
chmod +x ${D}/${base_libdir}/*
}
+get_monotonic_srcrev () {
+ (cd ${S}; eval `git rev-list HEAD|wc -l`)
+}
diff --git a/recipes/uclibc/uclibc_nptl.bb b/recipes/uclibc/uclibc_nptl.bb
index a910590ab8..b1bb497d05 100644
--- a/recipes/uclibc/uclibc_nptl.bb
+++ b/recipes/uclibc/uclibc_nptl.bb
@@ -7,7 +7,7 @@
# on whether the base patches apply to the selected (SRCDATE) svn release.
#
UCLIBC_BASE ?= "0.9.30"
-SRCREV="b3d31460fbf188997c7337296a61409529f7c974"
+SRCREV="b3b9101a9c495f52c06cb2de27fcf45e6e5f0bf9"
PV = "${UCLIBC_BASE}+gitr${SRCREV}"
DEFAULT_PREFERENCE = "-1"
#DEFAULT_PREFERENCE is 0 (empty), releases have a preference of 1 so take
@@ -34,7 +34,6 @@ SRC_URI = "git://uclibc.org/uClibc.git;protocol=git;branch=nptl \
file://uclibc-arm-ftruncate64.patch;patch=1 \
file://ldso_use_arm_dl_linux_resolve_in_thumb_mode.patch;patch=1 \
file://uclibc_arm_remove_duplicate_sysdep_defs.patch;patch=1 \
- file://uclibc_rpc_thread.patch;patch=1 \
file://installfix.patch;patch=1 \
"