diff options
author | Felix Domke <tmbinc@elitedvb.ne> | 2008-03-11 23:58:04 +0000 |
---|---|---|
committer | Felix Domke <tmbinc@elitedvb.ne> | 2008-03-11 23:58:04 +0000 |
commit | 5351c367da11587c79517ec0c655cba3732c4e67 (patch) | |
tree | 34a7d24b6bedc94cab567f4c3a2408051b5fb63f /packages | |
parent | 0c5de51e1222e3e753159f20fc14bf0380fac135 (diff) | |
download | openembedded-5351c367da11587c79517ec0c655cba3732c4e67.tar.gz |
linux-dm800: update to stblinux-5.1
Diffstat (limited to 'packages')
8 files changed, 116 insertions, 8311 deletions
diff --git a/packages/linux/linux-dm800.bb b/packages/linux/linux-dm800.bb index 64e9d46aab..49b201f263 100644 --- a/packages/linux/linux-dm800.bb +++ b/packages/linux/linux-dm800.bb @@ -3,20 +3,28 @@ LICENSE = "GPL" PN = "linux-dm800" KV = "2.6.12" PV = "2.6.12" -PR = "r2" +PR = "r3" # note, the rX in the filename is *NOT* the packet revision - it's the patch revision. -SRC_URI += "http://sources.dreamboxupdate.com/download/kernel-patches/stblinux-2.6.12-5.0.tar.bz2 \ +SRC_URI += "ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-${KV}.tar.bz2 \ file://dm800_defconfig \ - file://linuxmips-2.6.12-dream-r6.patch;patch=1;pnum=1 \ - file://linux-2.6.12-update_dvbapi-r1.patch;patch=1;pnum=1 \ - file://linux-2.6.12-dvb-multipid-r4.patch;patch=1;pnum=1 \ + http://sources.dreamboxupdate.com/download/kernel-patches/linux-2.6.12-brcm-5.1.patch.bz2;patch=1;pnum=1 \ + http://sources.dreamboxupdate.com/download/kernel-patches/linux-2.6.12-update_dvbapi-r1.patch.bz2;patch=1;pnum=1 \ + http://sources.dreamboxupdate.com/download/kernel-patches/linux-2.6.12-dvb-multipid-r4.patch.bz2;patch=1;pnum=1 \ + http://sources.dreamboxupdate.com/download/kernel-patches/linux-2.6.12-dvb-core-fix-several-locking-problems.patch.bz2;patch=1;pnum=1 \ + file://linux-2.6.12-dream-misc.patch;patch=1;pnum=1 \ + file://linux-2.6.12-add-ioprio.patch;patch=1;pnum=1 \ + file://linux-2.6.12-fix-serial.patch;patch=1;pnum=1 \ + file://linux-2.6.12-dm800-flash-layout.patch;patch=1;pnum=1 \ file://linux-2.6.12-dream-temp.patch;patch=1;pnum=1 \ file://linux-2.6.12-brcm-mtd-blkdevfs-fix.diff;patch=1;pnum=1 \ - file://linux-2.6.12-dm800-flash-layout.patch;patch=1;pnum=1 \ file://linux-2.6.12-set-custom-extraversion.patch;patch=1;pnum=1 \ file://linux-2.6.12-7401C0-enable-llsc.patch;patch=1;pnum=1 \ - file://linux-2.6.12-fixup-prom-args.patch;patch=1;pnum=1" + file://linux-2.6.12-fixup-prom-args.patch;patch=1;pnum=1 \ + file://linuxmips-2.6.12-fix-fadvise.patch;patch=1;pnum=1 \ + file://linuxmips-2.6.12-fix-futex.patch;patch=1;pnum=1 \ + file://linuxmips-2.6.12-gcc4-compile-fix.patch;patch=1;pnum=1 \ + file://linuxmips-2.6.12-gdb-fix.patch;patch=1;pnum=1" S = "${WORKDIR}/stblinux-2.6.12" @@ -30,6 +38,7 @@ KERNEL_OUTPUT = "vmlinux" KERNEL_OBJECT_SUFFIX = "ko" do_munge() { + mv ${WORKDIR}/linux-2.6.12 ${WORKDIR}/stblinux-2.6.12 if [ -d ${S}/drivers/sound ]; then rm -R ${S}/drivers/sound; fi; diff --git a/packages/linux/linux-dm800/dm800_defconfig b/packages/linux/linux-dm800/dm800_defconfig index 7ec4f65047..43ca4f6b4f 100644 --- a/packages/linux/linux-dm800/dm800_defconfig +++ b/packages/linux/linux-dm800/dm800_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.12-5.0-brcmstb-dm800 -# Thu Feb 14 14:42:21 2008 +# Linux kernel version: 2.6.12-5.1-brcmstb-dm800 +# Thu Feb 28 19:09:49 2008 # CONFIG_MIPS=y @@ -79,9 +79,11 @@ CONFIG_KMOD=y # CONFIG_MIPS_BCM7328 is not set # CONFIG_MIPS_BCM7329 is not set # CONFIG_MIPS_BCM97456 is not set -# CONFIG_MIPS_BCM97456B0 is not set +# CONFIG_MIPS_BCM97456BX is not set +# CONFIG_MIPS_BCM97456BX_NAND is not set # CONFIG_MIPS_BCM7400AX is not set # CONFIG_MIPS_BCM7400BX is not set +# CONFIG_MIPS_BCM7400BX_NAND is not set # CONFIG_MIPS_BCM7400AX_NAND is not set # CONFIG_MIPS_BCM7405AX is not set # CONFIG_MIPS_BCM97455 is not set @@ -101,8 +103,10 @@ CONFIG_MIPS_BCM7401CX=y # CONFIG_MIPS_BCM7402CX is not set # CONFIG_MIPS_BCM7402CX_NAND is not set # CONFIG_MIPS_BCM7403AX is not set +# CONFIG_MIPS_BCM7451AX_SMB is not set # CONFIG_MIPS_BCM7403AX_NAND is not set # CONFIG_MIPS_BCM7452AX is not set +# CONFIG_MIPS_BCM7452AX_NAND is not set # CONFIG_MIPS_BCM7440AX is not set # CONFIG_MIPS_BCM7440BX is not set # CONFIG_MIPS_BCM7440BX_NAND is not set diff --git a/packages/linux/linux-dm800/linux-2.6.12-dm800-flash-layout.patch b/packages/linux/linux-dm800/linux-2.6.12-dm800-flash-layout.patch index fa7da19685..b6016ad4b8 100644 --- a/packages/linux/linux-dm800/linux-2.6.12-dm800-flash-layout.patch +++ b/packages/linux/linux-dm800/linux-2.6.12-dm800-flash-layout.patch @@ -1,20 +1,23 @@ -Index: stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c -=================================================================== ---- stblinux-2.6.12.orig/drivers/mtd/brcmnand/bcm7xxx-nand.c 2008-01-21 02:01:34.000000000 +0100 -+++ stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c 2008-01-21 02:01:52.000000000 +0100 -@@ -72,15 +72,27 @@ +diff -Naur stblinux-2.6.12-org/drivers/mtd/brcmnand/bcm7xxx-nand.c stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c +--- stblinux-2.6.12-org/drivers/mtd/brcmnand/bcm7xxx-nand.c 2008-02-28 18:40:17.000000000 +0100 ++++ stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c 2008-02-28 18:45:03.000000000 +0100 +@@ -78,19 +78,28 @@ static struct mtd_partition bcm7XXX_nand_parts[] = { - { name: "rootfs", offset: 0, size: DEFAULT_ROOTFS_SIZE }, -#ifdef CONFIG_MTD_ECM_PARTITION -- { name: "ecm", offset: DEFAULT_ROOTFS_SIZE, size: DEFAULT_ECM_SIZE }, +-#define AVAIL1_PART (1) +-#define OCAP_PART (2) +- { name: "avail1", offset: DEFAULT_ROOTFS_SIZE, size: DEFAULT_AVAIL1_SIZE }, +- { name: "ocap", offset: DEFAULT_ROOTFS_SIZE+DEFAULT_AVAIL1_SIZE, size: DEFAULT_OCAP_SIZE }, -#endif -- { name: "kernel", offset: 0x00800000, size: 4*1024*1024 }, -- { name: "cfe", offset: 0x00C00000, size: 2*1024*1024 }, -- { name: "nvm", offset: 0x00E00000, size: 1*1024*1024 }, +- { name: "kernel", offset: 0x00800000, size: 4<<20 }, +- { name: "cfe", offset: 0x00C00000, size: 2<<20 }, +- { name: "nvm", offset: 0x00E00000, size: 1<<20 }, - /* BBT 1MB not mountable by anyone */ - { name: "data", offset: 0x400000000, size: 0 }, +-}; + /* modified for Dreambox DM800 */ + { + .name = "complete", @@ -36,13 +39,15 @@ Index: stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c + .offset = 4*1024*1024, + .size = 60*1024*1024 + }, - }; ++ }; struct brcmnand_info { -@@ -98,52 +110,6 @@ + struct mtd_info mtd; +@@ -106,100 +115,6 @@ + } //EXPORT_SYMBOL(get_brcmnand_handle); - +- -/* - * Size and offset are variable, depending on the size of the chip, but - * cfe_kernel always starts at 1FC0_0000 and is 4MB size. @@ -54,6 +59,9 @@ Index: stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c - struct mtd_info* mtd = &nandinfo->mtd; - unsigned long size; - int i = 0; +- unsigned int ecm_size = DEFAULT_ECM_SIZE; +- unsigned int ocap_size = DEFAULT_OCAP_SIZE; +- unsigned int avail1_size = DEFAULT_AVAIL1_SIZE; - - if (mtd->size <= (512<<20)) { - size = mtd->size; // mtd->size may be different than nandinfo->size @@ -64,14 +72,40 @@ Index: stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c - size = 512 << 20; - *numParts = ARRAY_SIZE(bcm7XXX_nand_parts); - } +- +-#ifdef CONFIG_MTD_ECM_PARTITION +- /* Do not generate AVAIL1 partition if usable flash size is less than 64MB */ +- if (size < (64<<20)) { +- ecm_size = DEFAULT_OCAP_SIZE; +- bcm7XXX_nand_parts[AVAIL1_PART].size = avail1_size = 0; +- (*numParts)--; +- } +- else { +- int factor = size / (64 << 20); // Remember size is capped at 512MB +- +- bcm7XXX_nand_parts[OCAP_PART].size = ocap_size = factor*DEFAULT_OCAP_SIZE; +- bcm7XXX_nand_parts[AVAIL1_PART].size = avail1_size = factor*DEFAULT_AVAIL1_SIZE; +- ecm_size = ocap_size + avail1_size; +- } +- +-#endif - nandinfo->parts = bcm7XXX_nand_parts; -- bcm7XXX_nand_parts[0].size = size - DEFAULT_RESERVED_SIZE - DEFAULT_ECM_SIZE; +- bcm7XXX_nand_parts[0].size = size - DEFAULT_RESERVED_SIZE - ecm_size; - bcm7XXX_nand_parts[0].oobsel = &mtd->oobinfo; -printk("Part[%d] name=%s, size=%x, offset=%x\n", i, bcm7XXX_nand_parts[0].name, -bcm7XXX_nand_parts[0].size, bcm7XXX_nand_parts[0].offset); - - for (i=1; i< ARRAY_SIZE(bcm7XXX_nand_parts) - 1; i++) { -- bcm7XXX_nand_parts[i].offset += bcm7XXX_nand_parts[0].size - DEFAULT_ROOTFS_SIZE; +-#ifdef CONFIG_MTD_ECM_PARTITION +- //if (0 == bcm7XXX_nand_parts[i].size) +- // continue; +- /* Skip avail1 if size is less than 64 MB) */ +- if (0 == avail1_size && AVAIL1_PART == i) { +- bcm7XXX_nand_parts[i].offset = bcm7XXX_nand_parts[i-1].size + bcm7XXX_nand_parts[i-1].offset; +- continue; +- } +-#endif +- bcm7XXX_nand_parts[i].offset = bcm7XXX_nand_parts[i-1].size + bcm7XXX_nand_parts[i-1].offset; - // For now every partition uses the same oobinfo - bcm7XXX_nand_parts[i].oobsel = &mtd->oobinfo; -printk("Part[%d] name=%s, size=%x, offset=%x\n", i, bcm7XXX_nand_parts[i].name, @@ -84,42 +118,70 @@ Index: stblinux-2.6.12/drivers/mtd/brcmnand/bcm7xxx-nand.c - bcm7XXX_nand_parts[i].offset = 512 << 20; - bcm7XXX_nand_parts[i].size = mtd->size - (513 << 20); - bcm7XXX_nand_parts[i].oobsel = &mtd->oobinfo; +-#ifdef CONFIG_MTD_ECM_PARTITION +-printk("Part[%d] name=%s, size=%x, offset=%x\n", avail1_size? i: i-1, bcm7XXX_nand_parts[i].name, +-bcm7XXX_nand_parts[i].size, bcm7XXX_nand_parts[i].offset); +-#else -printk("Part[%d] name=%s, size=%x, offset=%x\n", i, bcm7XXX_nand_parts[i].name, -bcm7XXX_nand_parts[i].size, bcm7XXX_nand_parts[i].offset); +-#endif +- +- } +- +-#ifdef CONFIG_MTD_ECM_PARTITION +- /* Shift partitions 1 up if avail1_size is 0 */ +- if (0 == avail1_size) { +- for (i=AVAIL1_PART; i < *numParts; i++) { +- bcm7XXX_nand_parts[i].offset = bcm7XXX_nand_parts[i+1].offset; +- bcm7XXX_nand_parts[i].size = bcm7XXX_nand_parts[i+1].size; +- } +- bcm7XXX_nand_parts[*numParts].offset = 0; +- bcm7XXX_nand_parts[*numParts].size = 0; - } +-#endif -} - static int __devinit brcmnanddrv_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); -@@ -181,10 +147,9 @@ - } - - printk(" numchips=%d, size=%08x\n", info->brcmnand.numchips, info->mtd.size); +@@ -234,14 +149,12 @@ + if (brcmnand_scan(&info->mtd, MAX_NAND_CS)) { + err = -ENXIO; + goto out_free_info; +- } +- +- printk(" numchips=%d, size=%08x\n", info->brcmnand.numchips, info->mtd.size); - brcmnanddrv_setup_mtd_partitions(info, &numParts); - //printk(" add_mtd_partitions\n"); +-//printk(" add_mtd_partitions\n"); - add_mtd_partitions(&info->mtd, info->parts, numParts); -//printk(" dev_set_drvdata\n"); +- dev_set_drvdata(&pdev->dev, info); ++ } ++ ++ printk(" numchips=%d, size=%08x\n", info->brcmnand.numchips, info->mtd.size); + add_mtd_partitions(&info->mtd, bcm7XXX_nand_parts, 4); +//printk(" dev_set_drvdata\n"); - dev_set_drvdata(&pdev->dev, info); ++ dev_set_drvdata(&pdev->dev, info); //printk("<-- brcmnanddrv_probe\n"); return 0; -Index: stblinux-2.6.12/drivers/mtd/brcmnand/brcmnand_base.c -=================================================================== ---- stblinux-2.6.12.orig/drivers/mtd/brcmnand/brcmnand_base.c 2008-01-21 02:01:34.000000000 +0100 -+++ stblinux-2.6.12/drivers/mtd/brcmnand/brcmnand_base.c 2008-01-21 02:01:36.000000000 +0100 -@@ -200,6 +200,13 @@ + +diff -Naur stblinux-2.6.12-org/drivers/mtd/brcmnand/brcmnand_base.c stblinux-2.6.12/drivers/mtd/brcmnand/brcmnand_base.c +--- stblinux-2.6.12-org/drivers/mtd/brcmnand/brcmnand_base.c 2008-02-28 18:40:17.000000000 +0100 ++++ stblinux-2.6.12/drivers/mtd/brcmnand/brcmnand_base.c 2008-02-28 18:46:02.000000000 +0100 +@@ -296,7 +296,15 @@ .options = NAND_USE_FLASH_BBT, .timing1 = 0, .timing2 = 0, }, -+ { /* 8 */ +- ++ ++ { /* 20 */ + .chipId = HYNIX_HY27US08121A, + .mafId = FLASHTYPE_HYNIX, + .chipIdStr = "Hynix HY27US08121A (dream)", + .options = NAND_USE_FLASH_BBT, + .timing1 = 0, .timing2 = 0, + }, - #if 0 - { /* 9 */ - .chipId = SAMSUNG_K9K8G08UOA, ++ + { /* LAST DUMMY ENTRY */ + .chipId = 0, + .mafId = 0, diff --git a/packages/linux/linux-dm800/linux-2.6.12-dvb-multipid-r4.patch b/packages/linux/linux-dm800/linux-2.6.12-dvb-multipid-r4.patch deleted file mode 100644 index 006de12ca2..0000000000 --- a/packages/linux/linux-dm800/linux-2.6.12-dvb-multipid-r4.patch +++ /dev/null @@ -1,511 +0,0 @@ -diff -Naur linux-2.6.12.6-org/drivers/media/dvb/dvb-core/dmxdev.c linux-2.6.12.6/drivers/media/dvb/dvb-core/dmxdev.c ---- linux-2.6.12.6-org/drivers/media/dvb/dvb-core/dmxdev.c 2006-04-21 02:34:40.000000000 +0200 -+++ linux-2.6.12.6/drivers/media/dvb/dvb-core/dmxdev.c 2006-04-21 02:25:54.000000000 +0200 -@@ -416,11 +416,18 @@ - switch (dmxdevfilter->type) { - case DMXDEV_TYPE_SEC: - del_timer(&dmxdevfilter->timer); -- dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec); -+ dmxdevfilter->feed_sec->stop_filtering(dmxdevfilter->feed_sec); - break; - case DMXDEV_TYPE_PES: -- dmxdevfilter->feed.ts->stop_filtering(dmxdevfilter->feed.ts); -+ { -+ struct dmxdev_feed *feed=dmxdevfilter->feeds; -+ /* stop all feeds */ -+ while (feed) { -+ feed->ts->stop_filtering(feed->ts); -+ feed=feed->next; -+ } - break; -+ } - default: - return -EINVAL; - } -@@ -436,11 +443,18 @@ - - switch (filter->type) { - case DMXDEV_TYPE_SEC: -- return filter->feed.sec->start_filtering(filter->feed.sec); -+ return filter->feed_sec->start_filtering(filter->feed_sec); - break; - case DMXDEV_TYPE_PES: -- return filter->feed.ts->start_filtering(filter->feed.ts); -+ { -+ struct dmxdev_feed *feed=filter->feeds; -+ /* start all feeds */ -+ while (feed) { -+ feed->ts->start_filtering(feed->ts); -+ feed=feed->next; -+ } - break; -+ } - default: - return -EINVAL; - } -@@ -466,7 +480,8 @@ - return 0; - } - -- filter->dev->demux->release_section_feed(dmxdev->demux, filter->feed.sec); -+ filter->dev->demux->release_section_feed(dmxdev->demux, -+ filter->feed_sec); - - return 0; - } -@@ -478,25 +493,32 @@ - - switch (dmxdevfilter->type) { - case DMXDEV_TYPE_SEC: -- if (!dmxdevfilter->feed.sec) -+ if (!dmxdevfilter->feed_sec) - break; - dvb_dmxdev_feed_stop(dmxdevfilter); - if (dmxdevfilter->filter.sec) -- dmxdevfilter->feed.sec-> -- release_filter(dmxdevfilter->feed.sec, -+ dmxdevfilter->feed_sec-> -+ release_filter(dmxdevfilter->feed_sec, - dmxdevfilter->filter.sec); - dvb_dmxdev_feed_restart(dmxdevfilter); -- dmxdevfilter->feed.sec=NULL; -+ dmxdevfilter->feed_sec=NULL; - break; - case DMXDEV_TYPE_PES: -- if (!dmxdevfilter->feed.ts) -- break; -+ { -+ struct dmxdev_feed *feed=dmxdevfilter->feeds; -+ - dvb_dmxdev_feed_stop(dmxdevfilter); -- dmxdevfilter->dev->demux-> -- release_ts_feed(dmxdevfilter->dev->demux, -- dmxdevfilter->feed.ts); -- dmxdevfilter->feed.ts=NULL; -+ -+ /* remove all feeds */ -+ while (feed) { -+ dmxdevfilter->dev->demux-> -+ release_ts_feed(dmxdevfilter->dev->demux, -+ feed->ts); -+ feed->ts=NULL; -+ feed=feed->next; -+ } - break; -+ } - default: - if (dmxdevfilter->state==DMXDEV_STATE_ALLOCATED) - return 0; -@@ -508,15 +530,88 @@ - - static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter) - { -+ struct dmxdev_feed *feed=dmxdevfilter->feeds; -+ - if (dmxdevfilter->state<DMXDEV_STATE_SET) - return 0; - -+ /* free all */ -+ while (feed) { -+ struct dmxdev_feed *n=feed; -+ if (feed->ts) -+ printk("!!! ts still allocated\n"); -+ feed=feed->next; -+ vfree(n); -+ } -+ -+ dmxdevfilter->feeds=NULL; - dmxdevfilter->type=DMXDEV_TYPE_NONE; - dmxdevfilter->pid=0xffff; - dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); - return 0; - } - -+static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev, struct dmxdev_filter *filter, -+ struct dmxdev_feed *feed) -+{ -+ struct dmx_pes_filter_params *para = &filter->params.pes; -+ struct timespec timeout = { 0 }; -+ struct dmx_ts_feed **tsfeed = &feed->ts; -+ int ret; -+ int ts_type; -+ enum dmx_ts_pes ts_pes; -+ dmx_output_t otype; -+ -+ *tsfeed = 0; -+ -+ /* determine which format to output */ -+ otype = para->output; -+ -+ ts_pes = (enum dmx_ts_pes)para->pes_type; -+ -+ if (otype == DMX_OUT_DECODER) -+ ts_type = TS_DECODER; -+ else -+ ts_type = 0; -+ -+ /* DMX_OUT_TS_TAP expects TS headers */ -+ if (otype == DMX_OUT_TS_TAP) -+ ts_type |= TS_PACKET; -+ -+ /* DMX_OUT_TAP used to expect PES packets. */ -+ if (otype == DMX_OUT_TAP) { -+ ts_type |= TS_PACKET; -+ -+ /*now, if somebody specified something else than DMX_PES_OTHER, we assume TS filtering. */ -+ if (ts_pes == DMX_PES_OTHER) /* i.e. DMX_PES_OTHER for backward compat. */ -+ ts_type |= TS_PAYLOAD_ONLY; -+ } -+ -+ ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, tsfeed, -+ dvb_dmxdev_ts_callback); -+ -+ if (ret < 0) -+ return ret; -+ -+ (*tsfeed)->priv=filter; -+ -+ ret = (*tsfeed)->set(*tsfeed, feed->pid, ts_type, ts_pes, 32768, -+ timeout); -+ -+ if (ret < 0) { -+ dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); -+ return ret; -+ } -+ -+ ret = feed->ts->start_filtering(feed->ts); -+ if (ret < 0) { -+ dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); -+ return ret; -+ } -+ -+ return 0; -+} -+ - static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) - { - struct dmxdev *dmxdev = filter->dev; -@@ -545,7 +640,7 @@ - { - struct dmx_sct_filter_params *para=&filter->params.sec; - struct dmx_section_filter **secfilter=&filter->filter.sec; -- struct dmx_section_feed **secfeed=&filter->feed.sec; -+ struct dmx_section_feed **secfeed=&filter->feed_sec; - - *secfilter=NULL; - *secfeed=NULL; -@@ -555,7 +650,7 @@ - if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && - dmxdev->filter[i].pid == para->pid && - dmxdev->filter[i].type == DMXDEV_TYPE_SEC) { -- *secfeed = dmxdev->filter[i].feed.sec; -+ *secfeed = dmxdev->filter[i].feed_sec; - break; - } - } -@@ -588,7 +683,7 @@ - - if (ret < 0) { - dvb_dmxdev_feed_restart(filter); -- filter->feed.sec->start_filtering(*secfeed); -+ filter->feed_sec->start_filtering(*secfeed); - dprintk ("could not get filter\n"); - return ret; - } -@@ -610,7 +705,7 @@ - - filter->todo = 0; - -- ret = filter->feed.sec->start_filtering (filter->feed.sec); -+ ret = filter->feed_sec->start_filtering (filter->feed_sec); - - if (ret < 0) - return ret; -@@ -621,57 +716,16 @@ - - case DMXDEV_TYPE_PES: - { -- struct timespec timeout = { 0 }; -- struct dmx_pes_filter_params *para = &filter->params.pes; -- dmx_output_t otype; -- int ret; -- int ts_type; -- enum dmx_ts_pes ts_pes; -- struct dmx_ts_feed **tsfeed = &filter->feed.ts; -- -- filter->feed.ts = NULL; -- otype=para->output; -- -- ts_pes=(enum dmx_ts_pes) para->pes_type; -- -- if (otype == DMX_OUT_DECODER) -- ts_type=TS_DECODER; -- else -- ts_type=0; -- -- if (otype == DMX_OUT_TS_TAP) -- ts_type |= TS_PACKET; -- -- if (otype == DMX_OUT_TAP) -- { -- ts_type |= TS_PACKET; -- if (ts_pes == DMX_PES_OTHER) -- ts_type |= TS_PAYLOAD_ONLY; -- } -- -- ret=dmxdev->demux->allocate_ts_feed(dmxdev->demux, -- tsfeed, -- dvb_dmxdev_ts_callback); -- if (ret<0) -- return ret; -- -- (*tsfeed)->priv = (void *) filter; -- -- ret = (*tsfeed)->set(*tsfeed, para->pid, ts_type, ts_pes, -- 32768, timeout); -- -- if (ret < 0) { -- dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); -- return ret; -- } -- -- ret = filter->feed.ts->start_filtering(filter->feed.ts); -- -- if (ret < 0) { -- dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); -- return ret; -+ struct dmxdev_feed *feed=filter->feeds; -+ -+ /* start all feeds */ -+ while (feed) { -+ if (dvb_dmxdev_start_feed(dmxdev, filter, feed)) { -+ printk(".. feed start failed. we should unroll now.\n"); -+ } -+ feed=feed->next; - } -- -+ - break; - } - default: -@@ -712,7 +766,7 @@ - dvb_dmxdev_buffer_init(&dmxdevfilter->buffer); - dmxdevfilter->type=DMXDEV_TYPE_NONE; - dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); -- dmxdevfilter->feed.ts=NULL; -+ dmxdevfilter->feeds=NULL; - init_timer(&dmxdevfilter->timer); - - up(&dmxdev->mutex); -@@ -757,6 +811,58 @@ - filter->mode[i]^=0xff; - } - -+static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev, -+ struct dmxdev_filter *dmxdevfilter, -+ __u16 pid) -+{ -+ struct dmxdev_feed *feed=vmalloc(sizeof(struct dmxdev_feed)); -+ -+ if (feed == NULL) -+ return -ENOMEM; -+ -+ memset(feed, 0, sizeof(struct dmxdev_feed)); -+ -+ feed->pid=pid; -+ -+ feed->next=dmxdevfilter->feeds; -+ dmxdevfilter->feeds=feed; -+ -+ if (dmxdevfilter->state >= DMXDEV_STATE_GO) { -+ int ret = dvb_dmxdev_start_feed(dmxdev, dmxdevfilter, feed); -+ if (ret) { -+ printk("add_pid: start feed failed..\n"); -+ return ret; -+ } -+ } -+ -+ return 0; -+} -+ -+static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev, -+ struct dmxdev_filter *dmxdevfilter, -+ __u16 pid) -+{ -+ struct dmxdev_feed **feed=&dmxdevfilter->feeds; -+ -+ while (*feed) { -+ if ((*feed)->pid == pid) { -+ struct dmxdev_feed *f=*feed; -+ if (f->ts) { // feed active -+ f->ts->stop_filtering(f->ts); -+ dmxdevfilter->dev->demux->release_ts_feed( -+ dmxdevfilter->dev->demux, -+ f->ts); -+ f->ts=NULL; -+ } -+ *feed=(*feed)->next; -+ vfree(f); -+ continue; -+ } -+ feed=&(*feed)->next; -+ } -+ -+ return 0; -+} - - static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, - struct dmxdev_filter *dmxdevfilter, -@@ -784,6 +890,7 @@ - struct dmx_pes_filter_params *params) - { - dvb_dmxdev_filter_stop(dmxdevfilter); -+ dvb_dmxdev_filter_reset(dmxdevfilter); - - if (params->pes_type>DMX_PES_OTHER || params->pes_type<0) - return -EINVAL; -@@ -794,6 +901,12 @@ - - dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); - -+ if (dmxdevfilter->params.pes.pid <= 0x2000) { -+ int result = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, dmxdevfilter->params.pes.pid); -+ if (result) -+ return result; -+ } -+ - if (params->flags&DMX_IMMEDIATE_START) - return dvb_dmxdev_filter_start(dmxdevfilter); - -@@ -845,17 +958,19 @@ - struct dmxdev_filter *dmxdevfilter= file->private_data; - int ret=0; - -- if (down_interruptible(&dmxdevfilter->mutex)) -- return -ERESTARTSYS; -- -- if (dmxdevfilter->type==DMXDEV_TYPE_SEC) -+ if (dmxdevfilter->type==DMXDEV_TYPE_SEC) { -+ if (down_interruptible(&dmxdevfilter->mutex)) -+ return -ERESTARTSYS; -+ - ret=dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos); -+ -+ up(&dmxdevfilter->mutex); -+ } - else - ret=dvb_dmxdev_buffer_read(&dmxdevfilter->buffer, - file->f_flags&O_NONBLOCK, - buf, count, ppos); - -- up(&dmxdevfilter->mutex); - return ret; - } - -@@ -940,6 +1055,23 @@ - } - ret = dmxdev->demux->get_caps(dmxdev->demux, parg); - break; -+ case DMX_ADD_PID: -+ if (down_interruptible(&dmxdevfilter->mutex)) { -+ up(&dmxdev->mutex); -+ return -ERESTARTSYS; -+ } -+ ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, arg); -+ up(&dmxdevfilter->mutex); -+ break; -+ -+ case DMX_REMOVE_PID: -+ if (down_interruptible(&dmxdevfilter->mutex)) { -+ up(&dmxdev->mutex); -+ return -ERESTARTSYS; -+ } -+ ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, arg); -+ up(&dmxdevfilter->mutex); -+ break; - - case DMX_SET_SOURCE: - if (!dmxdev->demux->set_source) { -diff -Naur linux-2.6.12.6-org/drivers/media/dvb/dvb-core/dmxdev.h linux-2.6.12.6/drivers/media/dvb/dvb-core/dmxdev.h ---- linux-2.6.12.6-org/drivers/media/dvb/dvb-core/dmxdev.h 2006-04-21 02:34:40.000000000 +0200 -+++ linux-2.6.12.6/drivers/media/dvb/dvb-core/dmxdev.h 2006-04-11 15:40:47.000000000 +0200 -@@ -69,15 +69,20 @@ - } filter; - - union { -- struct dmx_ts_feed *ts; -- struct dmx_section_feed *sec; -- } feed; -- -- union { - struct dmx_sct_filter_params sec; - struct dmx_pes_filter_params pes; - } params; -- -+ -+ struct dmx_section_feed *feed_sec; -+ -+ struct dmxdev_feed { -+ int pid; -+ -+ struct dmx_ts_feed *ts; -+ -+ struct dmxdev_feed *next; -+ } *feeds; -+ - int type; - enum dmxdev_state state; - struct dmxdev *dev; -diff -Naur linux-2.6.12.6-org/include/linux/dvb/dmx.h linux-2.6.12.6/include/linux/dvb/dmx.h ---- linux-2.6.12.6-org/include/linux/dvb/dmx.h 2006-04-21 02:34:40.000000000 +0200 -+++ linux-2.6.12.6/include/linux/dvb/dmx.h 2006-04-11 15:40:47.000000000 +0200 -@@ -176,5 +176,7 @@ - #define DMX_GET_CAPS _IOR('o', 48, dmx_caps_t) - #define DMX_SET_SOURCE _IOW('o', 49, dmx_source_t) - #define DMX_GET_STC _IOWR('o', 50, struct dmx_stc) -+#define DMX_ADD_PID _IO('o', 51) -+#define DMX_REMOVE_PID _IO('o', 52) - - #endif /*_DVBDMX_H_*/ -diff -Naur linux-2.6.12.6-org/drivers/media/dvb/dvb-core/dvb_demux.c linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_demux.c ---- linux-2.6.12.6-org/drivers/media/dvb/dvb-core/dvb_demux.c 2006-06-19 18:21:24.000000000 +0200 -+++ linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_demux.c 2006-06-21 18:32:52.000000000 +0200 -@@ -365,36 +365,18 @@ - } - } - --#define DVR_FEED(f) \ -- (((f)->type == DMX_TYPE_TS) && \ -- ((f)->feed.ts.is_filtering) && \ -- (((f)->ts_type & (TS_PACKET|TS_PAYLOAD_ONLY)) == TS_PACKET)) -- - static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) - { - struct dvb_demux_feed *feed; - struct list_head *pos, *head = &demux->feed_list; - u16 pid = ts_pid(buf); -- int dvr_done = 0; - - list_for_each(pos, head) { - feed = list_entry(pos, struct dvb_demux_feed, list_head); - -- if ((feed->pid != pid) && (feed->pid != 0x2000)) -- continue; -- -- /* copy each packet only once to the dvr device, even -- * if a PID is in multiple filters (e.g. video + PCR) */ -- if ((DVR_FEED(feed)) && (dvr_done++)) -- continue; -- -- if (feed->pid == pid) { -+ if (feed->pid == pid) - dvb_dmx_swfilter_packet_type(feed, buf); -- if (DVR_FEED(feed)) -- continue; -- } -- -- if (feed->pid == 0x2000) -+ else if (feed->pid == 0x2000) - feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK); - } - } diff --git a/packages/linux/linux-dm800/linux-2.6.12-fix_serial.patch b/packages/linux/linux-dm800/linux-2.6.12-fix_serial.patch deleted file mode 100644 index d3997de757..0000000000 --- a/packages/linux/linux-dm800/linux-2.6.12-fix_serial.patch +++ /dev/null @@ -1,21 +0,0 @@ -Index: stblinux-2.6.12/drivers/char/brcmserial.c -=================================================================== ---- stblinux-2.6.12.orig/drivers/char/brcmserial.c 2007-12-16 22:49:40.000000000 +0100 -+++ stblinux-2.6.12/drivers/char/brcmserial.c 2007-12-16 22:56:00.000000000 +0100 -@@ -1208,6 +1208,7 @@ - 600, 1200, 1800, 2400, 4800, 9600, 19200, - 38400, 57600, 115200, 230400, 460800, 0 }; - -+#if 0 - int tty_get_baud_rate(struct tty_struct *tty) - { - struct async_struct * info = (struct async_struct *)tty->driver_data; -@@ -1234,6 +1235,8 @@ - } - #endif - -+#endif -+ - /* - * This routine is called to set the UART divisor registers to match - * the specified baud rate for a serial port. diff --git a/packages/linux/linux-dm800/linux-2.6.12-set-custom-extraversion.patch b/packages/linux/linux-dm800/linux-2.6.12-set-custom-extraversion.patch index 84309596f5..188986bd27 100644 --- a/packages/linux/linux-dm800/linux-2.6.12-set-custom-extraversion.patch +++ b/packages/linux/linux-dm800/linux-2.6.12-set-custom-extraversion.patch @@ -6,8 +6,8 @@ Index: stblinux-2.6.12/Makefile PATCHLEVEL = 6 SUBLEVEL = 12 # STABLE_VERSION = .2 --EXTRAVERSION =-5.0-brcmstb -+EXTRAVERSION =-5.0-brcmstb-dm800 +-EXTRAVERSION =-5.1-brcmstb ++EXTRAVERSION =-5.1-brcmstb-dm800 NAME=Woozy Numbat # *DOCUMENTATION* diff --git a/packages/linux/linux-dm800/linux-2.6.12-update_dvbapi-r1.patch b/packages/linux/linux-dm800/linux-2.6.12-update_dvbapi-r1.patch deleted file mode 100644 index b498c9f161..0000000000 --- a/packages/linux/linux-dm800/linux-2.6.12-update_dvbapi-r1.patch +++ /dev/null @@ -1,2953 +0,0 @@ -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/compat.h linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/compat.h ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/compat.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/compat.h 2006-04-11 14:31:37.000000000 +0200 -@@ -0,0 +1,19 @@ -+#ifndef _COMPAT_H -+#define _COMPAT_H -+ -+#include <linux/i2c-id.h> -+#include <linux/version.h> -+#include <linux/utsname.h> -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) -+# define class_device_create(a, b, c, d, e, f, g, h) class_simple_device_add(a, c, d, e, f, g, h) -+# define class_device_destroy(a, b...) class_simple_device_remove(b) -+# define class_create class_simple_create -+# define class_destroy class_simple_destroy -+# define class class_simple -+# define try_to_freeze() do { if (current->flags & PF_FREEZE) refrigerator(PF_FREEZE); } while(0) -+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) -+# define class_device_create(a, b, c, d, e, f, g, h) class_device_create(a, c, d, e, f, g, h) -+#endif -+ -+#endif -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/demux.h linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/demux.h ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/demux.h 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/demux.h 2006-04-11 14:31:37.000000000 +0200 -@@ -30,6 +30,7 @@ - #include <linux/errno.h> - #include <linux/list.h> - #include <linux/time.h> -+#include <linux/dvb/dmx.h> - - /*--------------------------------------------------------------------------*/ - /* Common definitions */ -@@ -47,8 +48,11 @@ - * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed filter. - */ - -+#ifndef DMX_MAX_SECTION_SIZE -+#define DMX_MAX_SECTION_SIZE 4096 -+#endif - #ifndef DMX_MAX_SECFEED_SIZE --#define DMX_MAX_SECFEED_SIZE 4096 -+#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188) - #endif - - -@@ -124,9 +128,7 @@ - u16 pid, - int type, - enum dmx_ts_pes pes_type, -- size_t callback_length, - size_t circular_buffer_size, -- int descramble, - struct timespec timeout); - int (*start_filtering) (struct dmx_ts_feed* feed); - int (*stop_filtering) (struct dmx_ts_feed* feed); -@@ -159,7 +161,6 @@ - int (*set) (struct dmx_section_feed* feed, - u16 pid, - size_t circular_buffer_size, -- int descramble, - int check_crc); - int (*allocate_filter) (struct dmx_section_feed* feed, - struct dmx_section_filter** filter); -@@ -207,7 +208,6 @@ - struct list_head connectivity_list; /* List of front-ends that can - be connected to a particular - demux */ -- void* priv; /* Pointer to private data of the API client */ - enum dmx_frontend_source source; - }; - -@@ -225,8 +225,6 @@ - #define DMX_MEMORY_BASED_FILTERING 8 /* write() available */ - #define DMX_CRC_CHECKING 16 - #define DMX_TS_DESCRAMBLING 32 --#define DMX_SECTION_PAYLOAD_DESCRAMBLING 64 --#define DMX_MAC_ADDRESS_DESCRAMBLING 128 - - /* - * Demux resource type identifier. -@@ -244,9 +242,7 @@ - struct dmx_demux { - u32 capabilities; /* Bitfield of capability flags */ - struct dmx_frontend* frontend; /* Front-end connected to the demux */ -- struct list_head reg_list; /* List of registered demuxes */ - void* priv; /* Pointer to private data of the API client */ -- int users; /* Number of users */ - int (*open) (struct dmx_demux* demux); - int (*close) (struct dmx_demux* demux); - int (*write) (struct dmx_demux* demux, const char* buf, size_t count); -@@ -260,17 +256,6 @@ - dmx_section_cb callback); - int (*release_section_feed) (struct dmx_demux* demux, - struct dmx_section_feed* feed); -- int (*descramble_mac_address) (struct dmx_demux* demux, -- u8* buffer1, -- size_t buffer1_length, -- u8* buffer2, -- size_t buffer2_length, -- u16 pid); -- int (*descramble_section_payload) (struct dmx_demux* demux, -- u8* buffer1, -- size_t buffer1_length, -- u8* buffer2, size_t buffer2_length, -- u16 pid); - int (*add_frontend) (struct dmx_demux* demux, - struct dmx_frontend* frontend); - int (*remove_frontend) (struct dmx_demux* demux, -@@ -282,20 +267,12 @@ - - int (*get_pes_pids) (struct dmx_demux* demux, u16 *pids); - -+ int (*get_caps) (struct dmx_demux* demux, struct dmx_caps *caps); -+ -+ int (*set_source) (struct dmx_demux* demux, const dmx_source_t *src); -+ - int (*get_stc) (struct dmx_demux* demux, unsigned int num, - u64 *stc, unsigned int *base); - }; - --/*--------------------------------------------------------------------------*/ --/* Demux directory */ --/*--------------------------------------------------------------------------*/ -- --/* -- * DMX_DIR_ENTRY(): Casts elements in the list of registered -- * demuxes from the generic type struct list_head* to the type struct dmx_demux -- *. -- */ -- --#define DMX_DIR_ENTRY(list) list_entry(list, struct dmx_demux, reg_list) -- - #endif /* #ifndef __DEMUX_H */ -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dmxdev.c linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dmxdev.c ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dmxdev.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dmxdev.c 2006-04-11 14:31:37.000000000 +0200 -@@ -42,12 +42,6 @@ - - #define dprintk if (debug) printk - --static inline struct dmxdev_filter * --dvb_dmxdev_file_to_filter(struct file *file) --{ -- return (struct dmxdev_filter *) file->private_data; --} -- - static inline void dvb_dmxdev_buffer_init(struct dmxdev_buffer *buffer) - { - buffer->data=NULL; -@@ -577,7 +571,7 @@ - return ret; - } - -- ret=(*secfeed)->set(*secfeed, para->pid, 32768, 0, -+ ret=(*secfeed)->set(*secfeed, para->pid, 32768, - (para->flags & DMX_CHECK_CRC) ? 1 : 0); - - if (ret<0) { -@@ -640,7 +634,7 @@ - - ts_pes=(enum dmx_ts_pes) para->pes_type; - -- if (ts_pes<DMX_PES_OTHER) -+ if (otype == DMX_OUT_DECODER) - ts_type=TS_DECODER; - else - ts_type=0; -@@ -649,7 +643,11 @@ - ts_type |= TS_PACKET; - - if (otype == DMX_OUT_TAP) -- ts_type |= TS_PAYLOAD_ONLY|TS_PACKET; -+ { -+ ts_type |= TS_PACKET; -+ if (ts_pes == DMX_PES_OTHER) -+ ts_type |= TS_PAYLOAD_ONLY; -+ } - - ret=dmxdev->demux->allocate_ts_feed(dmxdev->demux, - tsfeed, -@@ -660,7 +658,7 @@ - (*tsfeed)->priv = (void *) filter; - - ret = (*tsfeed)->set(*tsfeed, para->pid, ts_type, ts_pes, -- 188, 32768, 0, timeout); -+ 32768, timeout); - - if (ret < 0) { - dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); -@@ -669,8 +667,10 @@ - - ret = filter->feed.ts->start_filtering(filter->feed.ts); - -- if (ret < 0) -+ if (ret < 0) { -+ dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); - return ret; -+ } - - break; - } -@@ -842,7 +842,7 @@ - static ssize_t - dvb_demux_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) - { -- struct dmxdev_filter *dmxdevfilter=dvb_dmxdev_file_to_filter(file); -+ struct dmxdev_filter *dmxdevfilter= file->private_data; - int ret=0; - - if (down_interruptible(&dmxdevfilter->mutex)) -@@ -863,7 +863,7 @@ - static int dvb_demux_do_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, void *parg) - { -- struct dmxdev_filter *dmxdevfilter=dvb_dmxdev_file_to_filter(file); -+ struct dmxdev_filter *dmxdevfilter = file->private_data; - struct dmxdev *dmxdev=dmxdevfilter->dev; - unsigned long arg=(unsigned long) parg; - int ret=0; -@@ -933,6 +933,22 @@ - dmxdev->demux->get_pes_pids(dmxdev->demux, (u16 *)parg); - break; - -+ case DMX_GET_CAPS: -+ if (!dmxdev->demux->get_caps) { -+ ret = -EINVAL; -+ break; -+ } -+ ret = dmxdev->demux->get_caps(dmxdev->demux, parg); -+ break; -+ -+ case DMX_SET_SOURCE: -+ if (!dmxdev->demux->set_source) { -+ ret = -EINVAL; -+ break; -+ } -+ ret = dmxdev->demux->set_source(dmxdev->demux, parg); -+ break; -+ - case DMX_GET_STC: - if (!dmxdev->demux->get_stc) { - ret=-EINVAL; -@@ -960,7 +976,7 @@ - - static unsigned int dvb_demux_poll (struct file *file, poll_table *wait) - { -- struct dmxdev_filter *dmxdevfilter = dvb_dmxdev_file_to_filter(file); -+ struct dmxdev_filter *dmxdevfilter = file->private_data; - unsigned int mask = 0; - - if (!dmxdevfilter) -@@ -985,7 +1001,7 @@ - - static int dvb_demux_release(struct inode *inode, struct file *file) - { -- struct dmxdev_filter *dmxdevfilter = dvb_dmxdev_file_to_filter(file); -+ struct dmxdev_filter *dmxdevfilter = file->private_data; - struct dmxdev *dmxdev = dmxdevfilter->dev; - - return dvb_dmxdev_filter_free(dmxdev, dmxdevfilter); -@@ -1109,7 +1125,6 @@ - dvb_dmxdev_filter_state_set(&dmxdev->filter[i], DMXDEV_STATE_FREE); - dmxdev->dvr[i].dev=dmxdev; - dmxdev->dvr[i].buffer.data=NULL; -- dvb_dmxdev_filter_state_set(&dmxdev->filter[i], DMXDEV_STATE_FREE); - dvb_dmxdev_dvr_state_set(&dmxdev->dvr[i], DMXDEV_STATE_FREE); - } - -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_ca_en50221.c ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2006-04-11 14:31:37.000000000 +0200 -@@ -35,7 +35,8 @@ - #include <linux/moduleparam.h> - #include <linux/vmalloc.h> - #include <linux/delay.h> --#include <linux/rwsem.h> -+#include <linux/spinlock.h> -+#include <linux/sched.h> - - #include "dvb_ca_en50221.h" - #include "dvb_ringbuffer.h" -@@ -47,7 +48,7 @@ - - #define dprintk if (dvb_ca_en50221_debug) printk - --#define INIT_TIMEOUT_SECS 5 -+#define INIT_TIMEOUT_SECS 10 - - #define HOST_LINK_BUF_SIZE 0x200 - -@@ -110,9 +111,6 @@ - /* size of the buffer to use when talking to the CAM */ - int link_buf_size; - -- /* semaphore for syncing access to slot structure */ -- struct rw_semaphore sem; -- - /* buffer for incoming packets */ - struct dvb_ringbuffer rx_buffer; - -@@ -601,14 +599,11 @@ - if (ebuf == NULL) { - int buf_free; - -- down_read(&ca->slot_info[slot].sem); - if (ca->slot_info[slot].rx_buffer.data == NULL) { -- up_read(&ca->slot_info[slot].sem); - status = -EIO; - goto exit; - } - buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); -- up_read(&ca->slot_info[slot].sem); - - if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { - status = -EAGAIN; -@@ -679,14 +674,11 @@ - - /* OK, add it to the receive buffer, or copy into external buffer if supplied */ - if (ebuf == NULL) { -- down_read(&ca->slot_info[slot].sem); - if (ca->slot_info[slot].rx_buffer.data == NULL) { -- up_read(&ca->slot_info[slot].sem); - status = -EIO; - goto exit; - } - dvb_ringbuffer_pkt_write(&ca->slot_info[slot].rx_buffer, buf, bytes_read); -- up_read(&ca->slot_info[slot].sem); - } else { - memcpy(ebuf, buf, bytes_read); - } -@@ -801,12 +793,8 @@ - { - dprintk("%s\n", __FUNCTION__); - -- down_write(&ca->slot_info[slot].sem); - ca->pub->slot_shutdown(ca->pub, slot); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; -- vfree(ca->slot_info[slot].rx_buffer.data); -- ca->slot_info[slot].rx_buffer.data = NULL; -- up_write(&ca->slot_info[slot].sem); - - /* need to wake up all processes to check if they're now - trying to write to a defunct CAM */ -@@ -892,7 +880,7 @@ - - case DVB_CA_SLOTSTATE_RUNNING: - if (ca->open) -- dvb_ca_en50221_read_data(ca, slot, NULL, 0); -+ dvb_ca_en50221_thread_wakeup(ca); - break; - } - } -@@ -1126,16 +1114,16 @@ - break; - } - -- rxbuf = vmalloc(RX_BUFFER_SIZE); -- if (rxbuf == NULL) { -- printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num); -- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; -- dvb_ca_en50221_thread_update_delay(ca); -- break; -+ if (ca->slot_info[slot].rx_buffer.data == NULL) { -+ rxbuf = vmalloc(RX_BUFFER_SIZE); -+ if (rxbuf == NULL) { -+ printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num); -+ ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; -+ dvb_ca_en50221_thread_update_delay(ca); -+ break; -+ } -+ dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, rxbuf, RX_BUFFER_SIZE); - } -- down_write(&ca->slot_info[slot].sem); -- dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, rxbuf, RX_BUFFER_SIZE); -- up_write(&ca->slot_info[slot].sem); - - ca->pub->slot_ts_enable(ca->pub, slot); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING; -@@ -1147,11 +1135,7 @@ - if (!ca->open) - continue; - -- // no need to poll if the CAM supports IRQs -- if (ca->slot_info[slot].da_irq_supported) -- break; -- -- // poll mode -+ // poll slots for data - pktcount = 0; - while ((status = dvb_ca_en50221_read_data(ca, slot, NULL, 0)) > 0) { - if (!ca->open) -@@ -1366,12 +1350,13 @@ - /** - * Condition for waking up in dvb_ca_en50221_io_read_condition - */ --static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, int *result, int *_slot) -+static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, -+ int *result, int *_slot) - { - int slot; - int slot_count = 0; - int idx; -- int fraglen; -+ size_t fraglen; - int connection_id = -1; - int found = 0; - u8 hdr[2]; -@@ -1381,10 +1366,7 @@ - if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) - goto nextslot; - -- down_read(&ca->slot_info[slot].sem); -- - if (ca->slot_info[slot].rx_buffer.data == NULL) { -- up_read(&ca->slot_info[slot].sem); - return 0; - } - -@@ -1402,10 +1384,7 @@ - idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen); - } - -- if (!found) -- up_read(&ca->slot_info[slot].sem); -- -- nextslot: -+nextslot: - slot = (slot + 1) % ca->slot_count; - slot_count++; - } -@@ -1510,8 +1489,7 @@ - goto exit; - status = pktlen; - -- exit: -- up_read(&ca->slot_info[slot].sem); -+exit: - return status; - } - -@@ -1543,11 +1521,11 @@ - for (i = 0; i < ca->slot_count; i++) { - - if (ca->slot_info[i].slot_state == DVB_CA_SLOTSTATE_RUNNING) { -- down_write(&ca->slot_info[i].sem); - if (ca->slot_info[i].rx_buffer.data != NULL) { -+ /* it is safe to call this here without locks because -+ * ca->open == 0. Data is not read in this case */ - dvb_ringbuffer_flush(&ca->slot_info[i].rx_buffer); - } -- up_write(&ca->slot_info[i].sem); - } - } - -@@ -1606,7 +1584,6 @@ - dprintk("%s\n", __FUNCTION__); - - if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { -- up_read(&ca->slot_info[slot].sem); - mask |= POLLIN; - } - -@@ -1618,7 +1595,6 @@ - poll_wait(file, &ca->wait_queue, wait); - - if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { -- up_read(&ca->slot_info[slot].sem); - mask |= POLLIN; - } - -@@ -1708,7 +1684,6 @@ - ca->slot_info[i].slot_state = DVB_CA_SLOTSTATE_NONE; - atomic_set(&ca->slot_info[i].camchange_count, 0); - ca->slot_info[i].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; -- init_rwsem(&ca->slot_info[i].sem); - } - - if (signal_pending(current)) { -@@ -1728,7 +1703,7 @@ - ca->thread_pid = ret; - return 0; - -- error: -+error: - if (ca != NULL) { - if (ca->dvbdev != NULL) - dvb_unregister_device(ca->dvbdev); -@@ -1770,6 +1745,9 @@ - - for (i = 0; i < ca->slot_count; i++) { - dvb_ca_en50221_slot_shutdown(ca, i); -+ if (ca->slot_info[i].rx_buffer.data != NULL) { -+ vfree(ca->slot_info[i].rx_buffer.data); -+ } - } - kfree(ca->slot_info); - dvb_unregister_device(ca->dvbdev); -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_demux.c linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_demux.c ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_demux.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_demux.c 2006-04-11 14:31:37.000000000 +0200 -@@ -38,82 +38,52 @@ - */ - // #define DVB_DEMUX_SECTION_LOSS_LOG - -- --static LIST_HEAD(dmx_muxs); -- -- --static int dmx_register_demux(struct dmx_demux *demux) --{ -- demux->users = 0; -- list_add(&demux->reg_list, &dmx_muxs); -- return 0; --} -- --static int dmx_unregister_demux(struct dmx_demux* demux) --{ -- struct list_head *pos, *n, *head=&dmx_muxs; -- -- list_for_each_safe (pos, n, head) { -- if (DMX_DIR_ENTRY(pos) == demux) { -- if (demux->users>0) -- return -EINVAL; -- list_del(pos); -- return 0; -- } -- } -- -- return -ENODEV; --} -- -- - /****************************************************************************** - * static inlined helper functions - ******************************************************************************/ - -- - static inline u16 section_length(const u8 *buf) - { -- return 3+((buf[1]&0x0f)<<8)+buf[2]; -+ return 3 + ((buf[1] & 0x0f) << 8) + buf[2]; - } - -- - static inline u16 ts_pid(const u8 *buf) - { -- return ((buf[1]&0x1f)<<8)+buf[2]; -+ return ((buf[1] & 0x1f) << 8) + buf[2]; - } - -- - static inline u8 payload(const u8 *tsp) - { -- if (!(tsp[3] & 0x10)) // no payload? -+ if (!(tsp[3] & 0x10)) // no payload? - return 0; -- if (tsp[3] & 0x20) { // adaptation field? -- if (tsp[4] > 183) // corrupted data? -+ -+ if (tsp[3] & 0x20) { // adaptation field? -+ if (tsp[4] > 183) // corrupted data? - return 0; - else -- return 184-1-tsp[4]; -+ return 184 - 1 - tsp[4]; - } -+ - return 184; - } - -- --static u32 dvb_dmx_crc32 (struct dvb_demux_feed *f, const u8 *src, size_t len) -+static u32 dvb_dmx_crc32(struct dvb_demux_feed *f, const u8 *src, size_t len) - { -- return (f->feed.sec.crc_val = crc32_be (f->feed.sec.crc_val, src, len)); -+ return (f->feed.sec.crc_val = crc32_be(f->feed.sec.crc_val, src, len)); - } - -- --static void dvb_dmx_memcopy (struct dvb_demux_feed *f, u8 *d, const u8 *s, size_t len) -+static void dvb_dmx_memcopy(struct dvb_demux_feed *f, u8 *d, const u8 *s, -+ size_t len) - { -- memcpy (d, s, len); -+ memcpy(d, s, len); - } - -- - /****************************************************************************** - * Software filter functions - ******************************************************************************/ - --static inline int dvb_dmx_swfilter_payload (struct dvb_demux_feed *feed, const u8 *buf) -+static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed, -+ const u8 *buf) - { - int count = payload(buf); - int p; -@@ -123,32 +93,31 @@ - if (count == 0) - return -1; - -- p = 188-count; -+ p = 188 - count; - - /* -- cc=buf[3]&0x0f; -- ccok=((dvbdmxfeed->cc+1)&0x0f)==cc ? 1 : 0; -- dvbdmxfeed->cc=cc; -+ cc = buf[3] & 0x0f; -+ ccok = ((feed->cc + 1) & 0x0f) == cc; -+ feed->cc = cc; - if (!ccok) - printk("missed packet!\n"); - */ - -- if (buf[1] & 0x40) // PUSI ? -+ if (buf[1] & 0x40) // PUSI ? - feed->peslen = 0xfffa; - - feed->peslen += count; - -- return feed->cb.ts (&buf[p], count, NULL, 0, &feed->feed.ts, DMX_OK); -+ return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, DMX_OK); - } - -- --static int dvb_dmx_swfilter_sectionfilter (struct dvb_demux_feed *feed, -- struct dvb_demux_filter *f) -+static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, -+ struct dvb_demux_filter *f) - { - u8 neq = 0; - int i; - -- for (i=0; i<DVB_DEMUX_MASK_MAX; i++) { -+ for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) { - u8 xor = f->filter.filter_value[i] ^ feed->feed.sec.secbuf[i]; - - if (f->maskandmode[i] & xor) -@@ -160,12 +129,11 @@ - if (f->doneq && !neq) - return 0; - -- return feed->cb.sec (feed->feed.sec.secbuf, feed->feed.sec.seclen, -- NULL, 0, &f->filter, DMX_OK); -+ return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, -+ NULL, 0, &f->filter, DMX_OK); - } - -- --static inline int dvb_dmx_swfilter_section_feed (struct dvb_demux_feed *feed) -+static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) - { - struct dvb_demux *demux = feed->demux; - struct dvb_demux_filter *f = feed->filter; -@@ -195,26 +163,24 @@ - return 0; - } - -- - static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed) - { - struct dmx_section_feed *sec = &feed->feed.sec; - - #ifdef DVB_DEMUX_SECTION_LOSS_LOG -- if(sec->secbufp < sec->tsfeedp) -- { -+ if (sec->secbufp < sec->tsfeedp) { - int i, n = sec->tsfeedp - sec->secbufp; - -- /* section padding is done with 0xff bytes entirely. -- ** due to speed reasons, we won't check all of them -- ** but just first and last -- */ -- if(sec->secbuf[0] != 0xff || sec->secbuf[n-1] != 0xff) -- { -+ /* -+ * Section padding is done with 0xff bytes entirely. -+ * Due to speed reasons, we won't check all of them -+ * but just first and last. -+ */ -+ if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { - printk("dvb_demux.c section ts padding loss: %d/%d\n", - n, sec->tsfeedp); - printk("dvb_demux.c pad data:"); -- for(i = 0; i < n; i++) -+ for (i = 0; i < n; i++) - printk(" %02x", sec->secbuf[i]); - printk("\n"); - } -@@ -226,82 +192,81 @@ - } - - /* --** Losless Section Demux 1.4.1 by Emard --** Valsecchi Patrick: --** - middle of section A (no PUSI) --** - end of section A and start of section B --** (with PUSI pointing to the start of the second section) --** --** In this case, without feed->pusi_seen you'll receive a garbage section --** consisting of the end of section A. Basically because tsfeedp --** is incemented and the use=0 condition is not raised --** when the second packet arrives. --** --** Fix: --** when demux is started, let feed->pusi_seen = 0 to --** prevent initial feeding of garbage from the end of --** previous section. When you for the first time see PUSI=1 --** then set feed->pusi_seen = 1 --*/ --static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, const u8 *buf, u8 len) -+ * Losless Section Demux 1.4.1 by Emard -+ * Valsecchi Patrick: -+ * - middle of section A (no PUSI) -+ * - end of section A and start of section B -+ * (with PUSI pointing to the start of the second section) -+ * -+ * In this case, without feed->pusi_seen you'll receive a garbage section -+ * consisting of the end of section A. Basically because tsfeedp -+ * is incemented and the use=0 condition is not raised -+ * when the second packet arrives. -+ * -+ * Fix: -+ * when demux is started, let feed->pusi_seen = 0 to -+ * prevent initial feeding of garbage from the end of -+ * previous section. When you for the first time see PUSI=1 -+ * then set feed->pusi_seen = 1 -+ */ -+static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, -+ const u8 *buf, u8 len) - { - struct dvb_demux *demux = feed->demux; - struct dmx_section_feed *sec = &feed->feed.sec; - u16 limit, seclen, n; - -- if(sec->tsfeedp >= DMX_MAX_SECFEED_SIZE) -+ if (sec->tsfeedp >= DMX_MAX_SECFEED_SIZE) - return 0; - -- if(sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) -- { -+ if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { - #ifdef DVB_DEMUX_SECTION_LOSS_LOG - printk("dvb_demux.c section buffer full loss: %d/%d\n", -- sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, DMX_MAX_SECFEED_SIZE); -+ sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, -+ DMX_MAX_SECFEED_SIZE); - #endif - len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp; - } - -- if(len <= 0) -+ if (len <= 0) - return 0; - - demux->memcopy(feed, sec->secbuf_base + sec->tsfeedp, buf, len); - sec->tsfeedp += len; - -- /* ----------------------------------------------------- -- ** Dump all the sections we can find in the data (Emard) -- */ -- -+ /* -+ * Dump all the sections we can find in the data (Emard) -+ */ - limit = sec->tsfeedp; -- if(limit > DMX_MAX_SECFEED_SIZE) -- return -1; /* internal error should never happen */ -+ if (limit > DMX_MAX_SECFEED_SIZE) -+ return -1; /* internal error should never happen */ - - /* to be sure always set secbuf */ - sec->secbuf = sec->secbuf_base + sec->secbufp; - -- for(n = 0; sec->secbufp + 2 < limit; n++) -- { -+ for (n = 0; sec->secbufp + 2 < limit; n++) { - seclen = section_length(sec->secbuf); -- if(seclen <= 0 || seclen > DMX_MAX_SECFEED_SIZE -- || seclen + sec->secbufp > limit) -+ if (seclen <= 0 || seclen > DMX_MAX_SECTION_SIZE -+ || seclen + sec->secbufp > limit) - return 0; - sec->seclen = seclen; - sec->crc_val = ~0; - /* dump [secbuf .. secbuf+seclen) */ -- if(feed->pusi_seen) -+ if (feed->pusi_seen) - dvb_dmx_swfilter_section_feed(feed); - #ifdef DVB_DEMUX_SECTION_LOSS_LOG - else - printk("dvb_demux.c pusi not seen, discarding section data\n"); - #endif -- sec->secbufp += seclen; /* secbufp and secbuf moving together is */ -- sec->secbuf += seclen; /* redundand but saves pointer arithmetic */ -+ sec->secbufp += seclen; /* secbufp and secbuf moving together is */ -+ sec->secbuf += seclen; /* redundant but saves pointer arithmetic */ - } - - return 0; - } - -- --static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, const u8 *buf) -+static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, -+ const u8 *buf) - { - u8 p, count; - int ccok, dc_i = 0; -@@ -309,10 +274,10 @@ - - count = payload(buf); - -- if (count == 0) /* count == 0 if no payload or out of range */ -+ if (count == 0) /* count == 0 if no payload or out of range */ - return -1; - -- p = 188 - count; /* payload start */ -+ p = 188 - count; /* payload start */ - - cc = buf[3] & 0x0f; - ccok = ((feed->cc + 1) & 0x0f) == cc; -@@ -326,52 +291,53 @@ - - if (!ccok || dc_i) { - #ifdef DVB_DEMUX_SECTION_LOSS_LOG -- printk("dvb_demux.c discontinuity detected %d bytes lost\n", count); -- /* those bytes under sume circumstances will again be reported -- ** in the following dvb_dmx_swfilter_section_new -- */ -+ printk("dvb_demux.c discontinuity detected %d bytes lost\n", -+ count); -+ /* -+ * those bytes under sume circumstances will again be reported -+ * in the following dvb_dmx_swfilter_section_new -+ */ - #endif -- /* Discontinuity detected. Reset pusi_seen = 0 to -- ** stop feeding of suspicious data until next PUSI=1 arrives -- */ -+ /* -+ * Discontinuity detected. Reset pusi_seen = 0 to -+ * stop feeding of suspicious data until next PUSI=1 arrives -+ */ - feed->pusi_seen = 0; - dvb_dmx_swfilter_section_new(feed); -- return 0; - } - - if (buf[1] & 0x40) { -- // PUSI=1 (is set), section boundary is here -+ /* PUSI=1 (is set), section boundary is here */ - if (count > 1 && buf[p] < count) { -- const u8 *before = buf+p+1; -+ const u8 *before = &buf[p + 1]; - u8 before_len = buf[p]; -- const u8 *after = before+before_len; -- u8 after_len = count-1-before_len; -+ const u8 *after = &before[before_len]; -+ u8 after_len = count - 1 - before_len; - -- dvb_dmx_swfilter_section_copy_dump(feed, before, before_len); -+ dvb_dmx_swfilter_section_copy_dump(feed, before, -+ before_len); - /* before start of new section, set pusi_seen = 1 */ - feed->pusi_seen = 1; - dvb_dmx_swfilter_section_new(feed); -- dvb_dmx_swfilter_section_copy_dump(feed, after, after_len); -+ dvb_dmx_swfilter_section_copy_dump(feed, after, -+ after_len); - } - #ifdef DVB_DEMUX_SECTION_LOSS_LOG -- else -- if (count > 0) -- printk("dvb_demux.c PUSI=1 but %d bytes lost\n", count); -+ else if (count > 0) -+ printk("dvb_demux.c PUSI=1 but %d bytes lost\n", count); - #endif - } else { -- // PUSI=0 (is not set), no section boundary -- const u8 *entire = buf+p; -- u8 entire_len = count; -- -- dvb_dmx_swfilter_section_copy_dump(feed, entire, entire_len); -+ /* PUSI=0 (is not set), no section boundary */ -+ dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); - } -+ - return 0; - } - -- --static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed, const u8 *buf) -+static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed, -+ const u8 *buf) - { -- switch(feed->type) { -+ switch (feed->type) { - case DMX_TYPE_TS: - if (!feed->feed.ts.is_filtering) - break; -@@ -379,7 +345,8 @@ - if (feed->ts_type & TS_PAYLOAD_ONLY) - dvb_dmx_swfilter_payload(feed, buf); - else -- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK); -+ feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, -+ DMX_OK); - } - if (feed->ts_type & TS_DECODER) - if (feed->demux->write_to_decoder) -@@ -390,7 +357,7 @@ - if (!feed->feed.sec.is_filtering) - break; - if (dvb_dmx_swfilter_section_packet(feed, buf) < 0) -- feed->feed.sec.seclen = feed->feed.sec.secbufp=0; -+ feed->feed.sec.seclen = feed->feed.sec.secbufp = 0; - break; - - default: -@@ -406,7 +373,7 @@ - static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) - { - struct dvb_demux_feed *feed; -- struct list_head *pos, *head=&demux->feed_list; -+ struct list_head *pos, *head = &demux->feed_list; - u16 pid = ts_pid(buf); - int dvr_done = 0; - -@@ -432,21 +399,21 @@ - } - } - --void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, size_t count) -+void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, -+ size_t count) - { - spin_lock(&demux->lock); - - while (count--) { -- if(buf[0] == 0x47) { -- dvb_dmx_swfilter_packet(demux, buf); -- } -+ if (buf[0] == 0x47) -+ dvb_dmx_swfilter_packet(demux, buf); - buf += 188; - } - - spin_unlock(&demux->lock); - } --EXPORT_SYMBOL(dvb_dmx_swfilter_packets); - -+EXPORT_SYMBOL(dvb_dmx_swfilter_packets); - - void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) - { -@@ -454,8 +421,10 @@ - - spin_lock(&demux->lock); - -- if ((i = demux->tsbufp)) { -- if (count < (j=188-i)) { -+ if (demux->tsbufp) { -+ i = demux->tsbufp; -+ j = 188 - i; -+ if (count < j) { - memcpy(&demux->tsbuf[i], buf, count); - demux->tsbufp += count; - goto bailout; -@@ -469,13 +438,13 @@ - - while (p < count) { - if (buf[p] == 0x47) { -- if (count-p >= 188) { -- dvb_dmx_swfilter_packet(demux, buf+p); -+ if (count - p >= 188) { -+ dvb_dmx_swfilter_packet(demux, &buf[p]); - p += 188; - } else { -- i = count-p; -- memcpy(demux->tsbuf, buf+p, i); -- demux->tsbufp=i; -+ i = count - p; -+ memcpy(demux->tsbuf, &buf[p], i); -+ demux->tsbufp = i; - goto bailout; - } - } else -@@ -485,24 +454,29 @@ - bailout: - spin_unlock(&demux->lock); - } -+ - EXPORT_SYMBOL(dvb_dmx_swfilter); - - void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) - { -- int p = 0,i, j; -+ int p = 0, i, j; - u8 tmppack[188]; -+ - spin_lock(&demux->lock); - -- if ((i = demux->tsbufp)) { -- if (count < (j=204-i)) { -+ if (demux->tsbufp) { -+ i = demux->tsbufp; -+ j = 204 - i; -+ if (count < j) { - memcpy(&demux->tsbuf[i], buf, count); - demux->tsbufp += count; - goto bailout; - } - memcpy(&demux->tsbuf[i], buf, j); -- if ((demux->tsbuf[0] == 0x47)|(demux->tsbuf[0]==0xB8)) { -+ if ((demux->tsbuf[0] == 0x47) | (demux->tsbuf[0] == 0xB8)) { - memcpy(tmppack, demux->tsbuf, 188); -- if (tmppack[0] == 0xB8) tmppack[0] = 0x47; -+ if (tmppack[0] == 0xB8) -+ tmppack[0] = 0x47; - dvb_dmx_swfilter_packet(demux, tmppack); - } - demux->tsbufp = 0; -@@ -510,16 +484,17 @@ - } - - while (p < count) { -- if ((buf[p] == 0x47)|(buf[p] == 0xB8)) { -- if (count-p >= 204) { -- memcpy(tmppack, buf+p, 188); -- if (tmppack[0] == 0xB8) tmppack[0] = 0x47; -+ if ((buf[p] == 0x47) | (buf[p] == 0xB8)) { -+ if (count - p >= 204) { -+ memcpy(tmppack, &buf[p], 188); -+ if (tmppack[0] == 0xB8) -+ tmppack[0] = 0x47; - dvb_dmx_swfilter_packet(demux, tmppack); - p += 204; - } else { -- i = count-p; -- memcpy(demux->tsbuf, buf+p, i); -- demux->tsbufp=i; -+ i = count - p; -+ memcpy(demux->tsbuf, &buf[p], i); -+ demux->tsbufp = i; - goto bailout; - } - } else { -@@ -530,14 +505,14 @@ - bailout: - spin_unlock(&demux->lock); - } --EXPORT_SYMBOL(dvb_dmx_swfilter_204); - -+EXPORT_SYMBOL(dvb_dmx_swfilter_204); - --static struct dvb_demux_filter * dvb_dmx_filter_alloc(struct dvb_demux *demux) -+static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux) - { - int i; - -- for (i=0; i<demux->filternum; i++) -+ for (i = 0; i < demux->filternum; i++) - if (demux->filter[i].state == DMX_STATE_FREE) - break; - -@@ -549,11 +524,11 @@ - return &demux->filter[i]; - } - --static struct dvb_demux_feed * dvb_dmx_feed_alloc(struct dvb_demux *demux) -+static struct dvb_demux_feed *dvb_dmx_feed_alloc(struct dvb_demux *demux) - { - int i; - -- for (i=0; i<demux->feednum; i++) -+ for (i = 0; i < demux->feednum; i++) - if (demux->feed[i].state == DMX_STATE_FREE) - break; - -@@ -581,7 +556,7 @@ - spin_lock_irq(&feed->demux->lock); - if (dvb_demux_feed_find(feed)) { - printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n", -- __FUNCTION__, feed->type, feed->state, feed->pid); -+ __FUNCTION__, feed->type, feed->state, feed->pid); - goto out; - } - -@@ -595,7 +570,7 @@ - spin_lock_irq(&feed->demux->lock); - if (!(dvb_demux_feed_find(feed))) { - printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n", -- __FUNCTION__, feed->type, feed->state, feed->pid); -+ __FUNCTION__, feed->type, feed->state, feed->pid); - goto out; - } - -@@ -604,18 +579,17 @@ - spin_unlock_irq(&feed->demux->lock); - } - --static int dmx_ts_feed_set (struct dmx_ts_feed* ts_feed, u16 pid, int ts_type, -- enum dmx_ts_pes pes_type, size_t callback_length, -- size_t circular_buffer_size, int descramble, -- struct timespec timeout) -+static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type, -+ enum dmx_ts_pes pes_type, -+ size_t circular_buffer_size, struct timespec timeout) - { -- struct dvb_demux_feed *feed = (struct dvb_demux_feed *) ts_feed; -+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; - struct dvb_demux *demux = feed->demux; - - if (pid > DMX_MAX_PID) - return -EINVAL; - -- if (down_interruptible (&demux->mutex)) -+ if (down_interruptible(&demux->mutex)) - return -ERESTARTSYS; - - if (ts_type & TS_DECODER) { -@@ -638,20 +612,13 @@ - - feed->pid = pid; - feed->buffer_size = circular_buffer_size; -- feed->descramble = descramble; - feed->timeout = timeout; -- feed->cb_length = callback_length; - feed->ts_type = ts_type; - feed->pes_type = pes_type; - -- if (feed->descramble) { -- up(&demux->mutex); -- return -ENOSYS; -- } -- - if (feed->buffer_size) { - #ifdef NOBUFS -- feed->buffer=NULL; -+ feed->buffer = NULL; - #else - feed->buffer = vmalloc(feed->buffer_size); - if (!feed->buffer) { -@@ -667,14 +634,13 @@ - return 0; - } - -- --static int dmx_ts_feed_start_filtering(struct dmx_ts_feed* ts_feed) -+static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed) - { -- struct dvb_demux_feed *feed = (struct dvb_demux_feed *) ts_feed; -+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; - struct dvb_demux *demux = feed->demux; - int ret; - -- if (down_interruptible (&demux->mutex)) -+ if (down_interruptible(&demux->mutex)) - return -ERESTARTSYS; - - if (feed->state != DMX_STATE_READY || feed->type != DMX_TYPE_TS) { -@@ -701,13 +667,13 @@ - return 0; - } - --static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed* ts_feed) -+static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed *ts_feed) - { -- struct dvb_demux_feed *feed = (struct dvb_demux_feed *) ts_feed; -+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; - struct dvb_demux *demux = feed->demux; - int ret; - -- if (down_interruptible (&demux->mutex)) -+ if (down_interruptible(&demux->mutex)) - return -ERESTARTSYS; - - if (feed->state < DMX_STATE_GO) { -@@ -731,13 +697,14 @@ - return ret; - } - --static int dvbdmx_allocate_ts_feed (struct dmx_demux *dmx, struct dmx_ts_feed **ts_feed, -- dmx_ts_cb callback) -+static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx, -+ struct dmx_ts_feed **ts_feed, -+ dmx_ts_cb callback) - { -- struct dvb_demux *demux = (struct dvb_demux *) dmx; -+ struct dvb_demux *demux = (struct dvb_demux *)dmx; - struct dvb_demux_feed *feed; - -- if (down_interruptible (&demux->mutex)) -+ if (down_interruptible(&demux->mutex)) - return -ERESTARTSYS; - - if (!(feed = dvb_dmx_feed_alloc(demux))) { -@@ -760,7 +727,6 @@ - (*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering; - (*ts_feed)->set = dmx_ts_feed_set; - -- - if (!(feed->filter = dvb_dmx_filter_alloc(demux))) { - feed->state = DMX_STATE_FREE; - up(&demux->mutex); -@@ -776,22 +742,22 @@ - return 0; - } - --static int dvbdmx_release_ts_feed(struct dmx_demux *dmx, struct dmx_ts_feed *ts_feed) -+static int dvbdmx_release_ts_feed(struct dmx_demux *dmx, -+ struct dmx_ts_feed *ts_feed) - { -- struct dvb_demux *demux = (struct dvb_demux *) dmx; -- struct dvb_demux_feed *feed = (struct dvb_demux_feed *) ts_feed; -+ struct dvb_demux *demux = (struct dvb_demux *)dmx; -+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; - -- if (down_interruptible (&demux->mutex)) -+ if (down_interruptible(&demux->mutex)) - return -ERESTARTSYS; - - if (feed->state == DMX_STATE_FREE) { - up(&demux->mutex); - return -EINVAL; - } -- - #ifndef NOBUFS - vfree(feed->buffer); -- feed->buffer=0; -+ feed->buffer = NULL; - #endif - - feed->state = DMX_STATE_FREE; -@@ -808,19 +774,18 @@ - return 0; - } - -- - /****************************************************************************** - * dmx_section_feed API calls - ******************************************************************************/ - --static int dmx_section_feed_allocate_filter(struct dmx_section_feed* feed, -- struct dmx_section_filter** filter) -+static int dmx_section_feed_allocate_filter(struct dmx_section_feed *feed, -+ struct dmx_section_filter **filter) - { -- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) feed; -+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; - struct dvb_demux *dvbdemux = dvbdmxfeed->demux; - struct dvb_demux_filter *dvbdmxfilter; - -- if (down_interruptible (&dvbdemux->mutex)) -+ if (down_interruptible(&dvbdemux->mutex)) - return -ERESTARTSYS; - - dvbdmxfilter = dvb_dmx_filter_alloc(dvbdemux); -@@ -844,36 +809,29 @@ - return 0; - } - -- --static int dmx_section_feed_set(struct dmx_section_feed* feed, -- u16 pid, size_t circular_buffer_size, -- int descramble, int check_crc) -+static int dmx_section_feed_set(struct dmx_section_feed *feed, -+ u16 pid, size_t circular_buffer_size, -+ int check_crc) - { -- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) feed; -+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; - struct dvb_demux *dvbdmx = dvbdmxfeed->demux; - - if (pid > 0x1fff) - return -EINVAL; - -- if (down_interruptible (&dvbdmx->mutex)) -+ if (down_interruptible(&dvbdmx->mutex)) - return -ERESTARTSYS; - - dvb_demux_feed_add(dvbdmxfeed); - - dvbdmxfeed->pid = pid; - dvbdmxfeed->buffer_size = circular_buffer_size; -- dvbdmxfeed->descramble = descramble; -- if (dvbdmxfeed->descramble) { -- up(&dvbdmx->mutex); -- return -ENOSYS; -- } -- - dvbdmxfeed->feed.sec.check_crc = check_crc; - - #ifdef NOBUFS - dvbdmxfeed->buffer = NULL; - #else -- dvbdmxfeed->buffer=vmalloc(dvbdmxfeed->buffer_size); -+ dvbdmxfeed->buffer = vmalloc(dvbdmxfeed->buffer_size); - if (!dvbdmxfeed->buffer) { - up(&dvbdmx->mutex); - return -ENOMEM; -@@ -885,7 +843,6 @@ - return 0; - } - -- - static void prepare_secfilters(struct dvb_demux_feed *dvbdmxfeed) - { - int i; -@@ -893,12 +850,12 @@ - struct dmx_section_filter *sf; - u8 mask, mode, doneq; - -- if (!(f=dvbdmxfeed->filter)) -+ if (!(f = dvbdmxfeed->filter)) - return; - do { - sf = &f->filter; - doneq = 0; -- for (i=0; i<DVB_DEMUX_MASK_MAX; i++) { -+ for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) { - mode = sf->filter_mode[i]; - mask = sf->filter_mask[i]; - f->maskandmode[i] = mask & mode; -@@ -908,14 +865,13 @@ - } while ((f = f->next)); - } - -- - static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed) - { -- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) feed; -+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; - struct dvb_demux *dvbdmx = dvbdmxfeed->demux; - int ret; - -- if (down_interruptible (&dvbdmx->mutex)) -+ if (down_interruptible(&dvbdmx->mutex)) - return -ERESTARTSYS; - - if (feed->is_filtering) { -@@ -954,14 +910,13 @@ - return 0; - } - -- --static int dmx_section_feed_stop_filtering(struct dmx_section_feed* feed) -+static int dmx_section_feed_stop_filtering(struct dmx_section_feed *feed) - { -- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) feed; -+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; - struct dvb_demux *dvbdmx = dvbdmxfeed->demux; - int ret; - -- if (down_interruptible (&dvbdmx->mutex)) -+ if (down_interruptible(&dvbdmx->mutex)) - return -ERESTARTSYS; - - if (!dvbdmx->stop_feed) { -@@ -980,15 +935,14 @@ - return ret; - } - -- - static int dmx_section_feed_release_filter(struct dmx_section_feed *feed, -- struct dmx_section_filter* filter) -+ struct dmx_section_filter *filter) - { -- struct dvb_demux_filter *dvbdmxfilter = (struct dvb_demux_filter *) filter, *f; -- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) feed; -+ struct dvb_demux_filter *dvbdmxfilter = (struct dvb_demux_filter *)filter, *f; -+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; - struct dvb_demux *dvbdmx = dvbdmxfeed->demux; - -- if (down_interruptible (&dvbdmx->mutex)) -+ if (down_interruptible(&dvbdmx->mutex)) - return -ERESTARTSYS; - - if (dvbdmxfilter->feed != dvbdmxfeed) { -@@ -1005,7 +959,7 @@ - if (f == dvbdmxfilter) { - dvbdmxfeed->filter = dvbdmxfilter->next; - } else { -- while(f->next != dvbdmxfilter) -+ while (f->next != dvbdmxfilter) - f = f->next; - f->next = f->next->next; - } -@@ -1020,10 +974,10 @@ - struct dmx_section_feed **feed, - dmx_section_cb callback) - { -- struct dvb_demux *dvbdmx = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdmx = (struct dvb_demux *)demux; - struct dvb_demux_feed *dvbdmxfeed; - -- if (down_interruptible (&dvbdmx->mutex)) -+ if (down_interruptible(&dvbdmx->mutex)) - return -ERESTARTSYS; - - if (!(dvbdmxfeed = dvb_dmx_feed_alloc(dvbdmx))) { -@@ -1041,7 +995,7 @@ - dvbdmxfeed->filter = NULL; - dvbdmxfeed->buffer = NULL; - -- (*feed)=&dvbdmxfeed->feed.sec; -+ (*feed) = &dvbdmxfeed->feed.sec; - (*feed)->is_filtering = 0; - (*feed)->parent = demux; - (*feed)->priv = NULL; -@@ -1059,21 +1013,21 @@ - static int dvbdmx_release_section_feed(struct dmx_demux *demux, - struct dmx_section_feed *feed) - { -- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) feed; -- struct dvb_demux *dvbdmx = (struct dvb_demux *) demux; -+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; -+ struct dvb_demux *dvbdmx = (struct dvb_demux *)demux; - -- if (down_interruptible (&dvbdmx->mutex)) -+ if (down_interruptible(&dvbdmx->mutex)) - return -ERESTARTSYS; - -- if (dvbdmxfeed->state==DMX_STATE_FREE) { -+ if (dvbdmxfeed->state == DMX_STATE_FREE) { - up(&dvbdmx->mutex); - return -EINVAL; - } - #ifndef NOBUFS - vfree(dvbdmxfeed->buffer); -- dvbdmxfeed->buffer=0; -+ dvbdmxfeed->buffer = NULL; - #endif -- dvbdmxfeed->state=DMX_STATE_FREE; -+ dvbdmxfeed->state = DMX_STATE_FREE; - - dvb_demux_feed_del(dvbdmxfeed); - -@@ -1083,14 +1037,13 @@ - return 0; - } - -- - /****************************************************************************** - * dvb_demux kernel data API calls - ******************************************************************************/ - - static int dvbdmx_open(struct dmx_demux *demux) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - - if (dvbdemux->users >= MAX_DVB_DEMUX_USERS) - return -EUSERS; -@@ -1099,10 +1052,9 @@ - return 0; - } - -- - static int dvbdmx_close(struct dmx_demux *demux) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - - if (dvbdemux->users == 0) - return -ENODEV; -@@ -1112,15 +1064,14 @@ - return 0; - } - -- - static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count) - { -- struct dvb_demux *dvbdemux=(struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - - if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE)) - return -EINVAL; - -- if (down_interruptible (&dvbdemux->mutex)) -+ if (down_interruptible(&dvbdemux->mutex)) - return -ERESTARTSYS; - dvb_dmx_swfilter(dvbdemux, buf, count); - up(&dvbdemux->mutex); -@@ -1130,10 +1081,10 @@ - return count; - } - -- --static int dvbdmx_add_frontend(struct dmx_demux *demux, struct dmx_frontend *frontend) -+static int dvbdmx_add_frontend(struct dmx_demux *demux, -+ struct dmx_frontend *frontend) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - struct list_head *head = &dvbdemux->frontend_list; - - list_add(&(frontend->connectivity_list), head); -@@ -1141,13 +1092,13 @@ - return 0; - } - -- --static int dvbdmx_remove_frontend(struct dmx_demux *demux, struct dmx_frontend *frontend) -+static int dvbdmx_remove_frontend(struct dmx_demux *demux, -+ struct dmx_frontend *frontend) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - struct list_head *pos, *n, *head = &dvbdemux->frontend_list; - -- list_for_each_safe (pos, n, head) { -+ list_for_each_safe(pos, n, head) { - if (DMX_FE_ENTRY(pos) == frontend) { - list_del(pos); - return 0; -@@ -1157,25 +1108,25 @@ - return -ENODEV; - } - -- --static struct list_head * dvbdmx_get_frontends(struct dmx_demux *demux) -+static struct list_head *dvbdmx_get_frontends(struct dmx_demux *demux) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - - if (list_empty(&dvbdemux->frontend_list)) - return NULL; -+ - return &dvbdemux->frontend_list; - } - -- --static int dvbdmx_connect_frontend(struct dmx_demux *demux, struct dmx_frontend *frontend) -+static int dvbdmx_connect_frontend(struct dmx_demux *demux, -+ struct dmx_frontend *frontend) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - - if (demux->frontend) - return -EINVAL; - -- if (down_interruptible (&dvbdemux->mutex)) -+ if (down_interruptible(&dvbdemux->mutex)) - return -ERESTARTSYS; - - demux->frontend = frontend; -@@ -1183,12 +1134,11 @@ - return 0; - } - -- - static int dvbdmx_disconnect_frontend(struct dmx_demux *demux) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - -- if (down_interruptible (&dvbdemux->mutex)) -+ if (down_interruptible(&dvbdemux->mutex)) - return -ERESTARTSYS; - - demux->frontend = NULL; -@@ -1196,44 +1146,42 @@ - return 0; - } - -- --static int dvbdmx_get_pes_pids(struct dmx_demux *demux, u16 *pids) -+static int dvbdmx_get_pes_pids(struct dmx_demux *demux, u16 * pids) - { -- struct dvb_demux *dvbdemux = (struct dvb_demux *) demux; -+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - -- memcpy(pids, dvbdemux->pids, 5*sizeof(u16)); -+ memcpy(pids, dvbdemux->pids, 5 * sizeof(u16)); - return 0; - } - -- - int dvb_dmx_init(struct dvb_demux *dvbdemux) - { -- int i, err; -+ int i; - struct dmx_demux *dmx = &dvbdemux->dmx; - - dvbdemux->users = 0; -- dvbdemux->filter = vmalloc(dvbdemux->filternum*sizeof(struct dvb_demux_filter)); -+ dvbdemux->filter = vmalloc(dvbdemux->filternum * sizeof(struct dvb_demux_filter)); - - if (!dvbdemux->filter) - return -ENOMEM; - -- dvbdemux->feed = vmalloc(dvbdemux->feednum*sizeof(struct dvb_demux_feed)); -+ dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); - if (!dvbdemux->feed) { - vfree(dvbdemux->filter); - return -ENOMEM; - } -- for (i=0; i<dvbdemux->filternum; i++) { -+ for (i = 0; i < dvbdemux->filternum; i++) { - dvbdemux->filter[i].state = DMX_STATE_FREE; - dvbdemux->filter[i].index = i; - } -- for (i=0; i<dvbdemux->feednum; i++) { -+ for (i = 0; i < dvbdemux->feednum; i++) { - dvbdemux->feed[i].state = DMX_STATE_FREE; - dvbdemux->feed[i].index = i; - } -- dvbdemux->frontend_list.next= -- dvbdemux->frontend_list.prev= -- &dvbdemux->frontend_list; -- for (i=0; i<DMX_TS_PES_OTHER; i++) { -+ -+ INIT_LIST_HEAD(&dvbdemux->frontend_list); -+ -+ for (i = 0; i < DMX_TS_PES_OTHER; i++) { - dvbdemux->pesfilter[i] = NULL; - dvbdemux->pids[i] = 0xffff; - } -@@ -1247,12 +1195,11 @@ - if (!dvbdemux->check_crc32) - dvbdemux->check_crc32 = dvb_dmx_crc32; - -- if (!dvbdemux->memcopy) -- dvbdemux->memcopy = dvb_dmx_memcopy; -+ if (!dvbdemux->memcopy) -+ dvbdemux->memcopy = dvb_dmx_memcopy; - - dmx->frontend = NULL; -- dmx->reg_list.prev = dmx->reg_list.next = &dmx->reg_list; -- dmx->priv = (void *) dvbdemux; -+ dmx->priv = dvbdemux; - dmx->open = dvbdmx_open; - dmx->close = dvbdmx_close; - dmx->write = dvbdmx_write; -@@ -1261,9 +1208,6 @@ - dmx->allocate_section_feed = dvbdmx_allocate_section_feed; - dmx->release_section_feed = dvbdmx_release_section_feed; - -- dmx->descramble_mac_address = NULL; -- dmx->descramble_section_payload = NULL; -- - dmx->add_frontend = dvbdmx_add_frontend; - dmx->remove_frontend = dvbdmx_remove_frontend; - dmx->get_frontends = dvbdmx_get_frontends; -@@ -1274,21 +1218,15 @@ - sema_init(&dvbdemux->mutex, 1); - spin_lock_init(&dvbdemux->lock); - -- if ((err = dmx_register_demux(dmx)) < 0) -- return err; -- - return 0; - } --EXPORT_SYMBOL(dvb_dmx_init); - -+EXPORT_SYMBOL(dvb_dmx_init); - --int dvb_dmx_release(struct dvb_demux *dvbdemux) -+void dvb_dmx_release(struct dvb_demux *dvbdemux) - { -- struct dmx_demux *dmx = &dvbdemux->dmx; -- -- dmx_unregister_demux(dmx); - vfree(dvbdemux->filter); - vfree(dvbdemux->feed); -- return 0; - } -+ - EXPORT_SYMBOL(dvb_dmx_release); -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_demux.h ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_demux.h 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_demux.h 2006-04-11 14:31:37.000000000 +0200 -@@ -20,7 +20,6 @@ - * - */ - -- - #ifndef _DVB_DEMUX_H_ - #define _DVB_DEMUX_H_ - -@@ -44,103 +43,98 @@ - #define DVB_DEMUX_MASK_MAX 18 - - struct dvb_demux_filter { -- struct dmx_section_filter filter; -- u8 maskandmode [DMX_MAX_FILTER_SIZE]; -- u8 maskandnotmode [DMX_MAX_FILTER_SIZE]; -+ struct dmx_section_filter filter; -+ u8 maskandmode[DMX_MAX_FILTER_SIZE]; -+ u8 maskandnotmode[DMX_MAX_FILTER_SIZE]; - int doneq; - -- struct dvb_demux_filter *next; -- struct dvb_demux_feed *feed; -- int index; -- int state; -- int type; -- int pesto; -- -- u16 handle; -- u16 hw_handle; -- struct timer_list timer; -- int ts_state; --}; -+ struct dvb_demux_filter *next; -+ struct dvb_demux_feed *feed; -+ int index; -+ int state; -+ int type; - -+ u16 hw_handle; -+ struct timer_list timer; -+}; - - #define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head) - - struct dvb_demux_feed { -- union { -- struct dmx_ts_feed ts; -- struct dmx_section_feed sec; -+ union { -+ struct dmx_ts_feed ts; -+ struct dmx_section_feed sec; - } feed; - -- union { -- dmx_ts_cb ts; -- dmx_section_cb sec; -+ union { -+ dmx_ts_cb ts; -+ dmx_section_cb sec; - } cb; - -- struct dvb_demux *demux; -+ struct dvb_demux *demux; - void *priv; -- int type; -- int state; -- u16 pid; -- u8 *buffer; -- int buffer_size; -- int descramble; -- -- struct timespec timeout; -- struct dvb_demux_filter *filter; -- int cb_length; -+ int type; -+ int state; -+ u16 pid; -+ u8 *buffer; -+ int buffer_size; -+ -+ struct timespec timeout; -+ struct dvb_demux_filter *filter; - -- int ts_type; -- enum dmx_ts_pes pes_type; -+ int ts_type; -+ enum dmx_ts_pes pes_type; - -- int cc; -- int pusi_seen; /* prevents feeding of garbage from previous section */ -+ int cc; -+ int pusi_seen; /* prevents feeding of garbage from previous section */ - -- u16 peslen; -+ u16 peslen; - - struct list_head list_head; -- int index; /* a unique index for each feed (can be used as hardware pid filter index) */ -+ unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */ - }; - - struct dvb_demux { -- struct dmx_demux dmx; -- void *priv; -- int filternum; -- int feednum; -- int (*start_feed) (struct dvb_demux_feed *feed); -- int (*stop_feed) (struct dvb_demux_feed *feed); -- int (*write_to_decoder) (struct dvb_demux_feed *feed, -+ struct dmx_demux dmx; -+ void *priv; -+ int filternum; -+ int feednum; -+ int (*start_feed)(struct dvb_demux_feed *feed); -+ int (*stop_feed)(struct dvb_demux_feed *feed); -+ int (*write_to_decoder)(struct dvb_demux_feed *feed, - const u8 *buf, size_t len); -- u32 (*check_crc32) (struct dvb_demux_feed *feed, -+ u32 (*check_crc32)(struct dvb_demux_feed *feed, - const u8 *buf, size_t len); -- void (*memcopy) (struct dvb_demux_feed *feed, u8 *dst, -+ void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst, - const u8 *src, size_t len); - -- int users; -+ int users; - #define MAX_DVB_DEMUX_USERS 10 -- struct dvb_demux_filter *filter; -- struct dvb_demux_feed *feed; -+ struct dvb_demux_filter *filter; -+ struct dvb_demux_feed *feed; - -- struct list_head frontend_list; -+ struct list_head frontend_list; - -- struct dvb_demux_feed *pesfilter[DMX_TS_PES_OTHER]; -- u16 pids[DMX_TS_PES_OTHER]; -- int playing; -- int recording; -+ struct dvb_demux_feed *pesfilter[DMX_TS_PES_OTHER]; -+ u16 pids[DMX_TS_PES_OTHER]; -+ int playing; -+ int recording; - - #define DMX_MAX_PID 0x2000 - struct list_head feed_list; -- u8 tsbuf[204]; -- int tsbufp; -+ u8 tsbuf[204]; -+ int tsbufp; - - struct semaphore mutex; - spinlock_t lock; - }; - -- - int dvb_dmx_init(struct dvb_demux *dvbdemux); --int dvb_dmx_release(struct dvb_demux *dvbdemux); --void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf, size_t count); -+void dvb_dmx_release(struct dvb_demux *dvbdemux); -+void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf, -+ size_t count); - void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count); --void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count); -+void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, -+ size_t count); - - #endif /* _DVB_DEMUX_H_ */ -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvbdev.c ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvbdev.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvbdev.c 2006-04-11 14:31:37.000000000 +0200 -@@ -56,8 +56,7 @@ - #define nums2minor(num,type,id) ((num << 6) | (id << 4) | type) - #define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64) - --struct class_simple *dvb_class; --EXPORT_SYMBOL(dvb_class); -+static struct class *dvb_class; - - static struct dvb_device* dvbdev_find_device (int minor) - { -@@ -236,8 +235,8 @@ - S_IFCHR | S_IRUSR | S_IWUSR, - "dvb/adapter%d/%s%d", adap->num, dnames[type], id); - -- class_simple_device_add(dvb_class, MKDEV(DVB_MAJOR, nums2minor(adap->num, type, id)), -- NULL, "dvb%d.%s%d", adap->num, dnames[type], id); -+ class_device_create(dvb_class, NULL, MKDEV(DVB_MAJOR, nums2minor(adap->num, type, id)), -+ NULL, "dvb%d.%s%d", adap->num, dnames[type], id); - - dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", - adap->num, dnames[type], id, nums2minor(adap->num, type, id), -@@ -256,7 +255,7 @@ - devfs_remove("dvb/adapter%d/%s%d", dvbdev->adapter->num, - dnames[dvbdev->type], dvbdev->id); - -- class_simple_device_remove(MKDEV(DVB_MAJOR, nums2minor(dvbdev->adapter->num, -+ class_device_destroy(dvb_class, MKDEV(DVB_MAJOR, nums2minor(dvbdev->adapter->num, - dvbdev->type, dvbdev->id))); - - list_del (&dvbdev->list_head); -@@ -412,7 +411,7 @@ - - devfs_mk_dir("dvb"); - -- dvb_class = class_simple_create(THIS_MODULE, "dvb"); -+ dvb_class = class_create(THIS_MODULE, "dvb"); - if (IS_ERR(dvb_class)) { - retval = PTR_ERR(dvb_class); - goto error; -@@ -429,7 +428,7 @@ - static void __exit exit_dvbdev(void) - { - devfs_remove("dvb"); -- class_simple_destroy(dvb_class); -+ class_destroy(dvb_class); - cdev_del(&dvb_device_cdev); - unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS); - } -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvbdev.h linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvbdev.h ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvbdev.h 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvbdev.h 2006-04-11 14:31:37.000000000 +0200 -@@ -29,6 +29,7 @@ - #include <linux/list.h> - #include <linux/devfs_fs_kernel.h> - #include <linux/smp_lock.h> -+#include "compat.h" - - #define DVB_MAJOR 212 - -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_frontend.c linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_frontend.c ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_frontend.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_frontend.c 2006-04-11 14:31:37.000000000 +0200 -@@ -35,6 +35,7 @@ - #include <linux/moduleparam.h> - #include <linux/list.h> - #include <linux/suspend.h> -+#include <linux/jiffies.h> - #include <asm/processor.h> - #include <asm/semaphore.h> - -@@ -112,6 +113,7 @@ - int exit; - int wakeup; - fe_status_t status; -+ fe_sec_tone_mode_t tone; - }; - - -@@ -327,7 +329,8 @@ - return 1; - - if (fepriv->dvbdev->writers == 1) -- if (jiffies - fepriv->release_jiffies > dvb_shutdown_timeout * HZ) -+ if (time_after(jiffies, fepriv->release_jiffies + -+ dvb_shutdown_timeout * HZ)) - return 1; - - return 0; -@@ -389,8 +392,7 @@ - break; - } - -- if (current->flags & PF_FREEZE) -- refrigerator(PF_FREEZE); -+ try_to_freeze(); - - if (down_interruptible(&fepriv->sem)) - break; -@@ -433,9 +435,7 @@ - /* we're tuned, and the lock is still good... */ - if (s & FE_HAS_LOCK) - continue; -- else { -- /* if we _WERE_ tuned, but now don't have a lock, -- * need to zigzag */ -+ else { /* if we _WERE_ tuned, but now don't have a lock */ - fepriv->state = FESTATE_ZIGZAG_FAST; - fepriv->started_auto_step = fepriv->auto_step; - check_wrapped = 0; -@@ -556,6 +556,49 @@ - fepriv->thread_pid); - } - -+s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime) -+{ -+ return ((curtime.tv_usec < lasttime.tv_usec) ? -+ 1000000 - lasttime.tv_usec + curtime.tv_usec : -+ curtime.tv_usec - lasttime.tv_usec); -+} -+EXPORT_SYMBOL(timeval_usec_diff); -+ -+static inline void timeval_usec_add(struct timeval *curtime, u32 add_usec) -+{ -+ curtime->tv_usec += add_usec; -+ if (curtime->tv_usec >= 1000000) { -+ curtime->tv_usec -= 1000000; -+ curtime->tv_sec++; -+ } -+} -+ -+/* -+ * Sleep until gettimeofday() > waketime + add_usec -+ * This needs to be as precise as possible, but as the delay is -+ * usually between 2ms and 32ms, it is done using a scheduled msleep -+ * followed by usleep (normally a busy-wait loop) for the remainder -+ */ -+void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec) -+{ -+ struct timeval lasttime; -+ s32 delta, newdelta; -+ -+ timeval_usec_add(waketime, add_usec); -+ -+ do_gettimeofday(&lasttime); -+ delta = timeval_usec_diff(lasttime, *waketime); -+ if (delta > 2500) { -+ msleep((delta - 1500) / 1000); -+ do_gettimeofday(&lasttime); -+ newdelta = timeval_usec_diff(lasttime, *waketime); -+ delta = (newdelta > delta) ? 0 : newdelta; -+ } -+ if (delta > 0) -+ udelay(delta); -+} -+EXPORT_SYMBOL(dvb_frontend_sleep_until); -+ - static int dvb_frontend_start(struct dvb_frontend *fe) - { - int ret; -@@ -625,11 +668,21 @@ - break; - } - -- case FE_READ_STATUS: -+ case FE_READ_STATUS: { -+ fe_status_t* status = parg; -+ -+ /* if retune was requested but hasn't occured yet, prevent -+ * that user get signal state from previous tuning */ -+ if(fepriv->state == FESTATE_RETUNE) { -+ err=0; -+ *status = 0; -+ break; -+ } -+ - if (fe->ops->read_status) -- err = fe->ops->read_status(fe, (fe_status_t*) parg); -+ err = fe->ops->read_status(fe, status); - break; -- -+ } - case FE_READ_BER: - if (fe->ops->read_ber) - err = fe->ops->read_ber(fe, (__u32*) parg); -@@ -680,6 +733,7 @@ - err = fe->ops->set_tone(fe, (fe_sec_tone_mode_t) parg); - fepriv->state = FESTATE_DISEQC; - fepriv->status = 0; -+ fepriv->tone = (fe_sec_tone_mode_t) parg; - } - break; - -@@ -696,6 +750,60 @@ - err = fe->ops->dishnetwork_send_legacy_command(fe, (unsigned int) parg); - fepriv->state = FESTATE_DISEQC; - fepriv->status = 0; -+ } else if (fe->ops->set_voltage) { -+ /* -+ * NOTE: This is a fallback condition. Some frontends -+ * (stv0299 for instance) take longer than 8msec to -+ * respond to a set_voltage command. Those switches -+ * need custom routines to switch properly. For all -+ * other frontends, the following shoule work ok. -+ * Dish network legacy switches (as used by Dish500) -+ * are controlled by sending 9-bit command words -+ * spaced 8msec apart. -+ * the actual command word is switch/port dependant -+ * so it is up to the userspace application to send -+ * the right command. -+ * The command must always start with a '0' after -+ * initialization, so parg is 8 bits and does not -+ * include the initialization or start bit -+ */ -+ unsigned int cmd = ((unsigned int) parg) << 1; -+ struct timeval nexttime; -+ struct timeval tv[10]; -+ int i; -+ u8 last = 1; -+ if (dvb_frontend_debug) -+ printk("%s switch command: 0x%04x\n", __FUNCTION__, cmd); -+ do_gettimeofday(&nexttime); -+ if (dvb_frontend_debug) -+ memcpy(&tv[0], &nexttime, sizeof(struct timeval)); -+ /* before sending a command, initialize by sending -+ * a 32ms 18V to the switch -+ */ -+ fe->ops->set_voltage(fe, SEC_VOLTAGE_18); -+ dvb_frontend_sleep_until(&nexttime, 32000); -+ -+ for (i = 0; i < 9; i++) { -+ if (dvb_frontend_debug) -+ do_gettimeofday(&tv[i + 1]); -+ if ((cmd & 0x01) != last) { -+ /* set voltage to (last ? 13V : 18V) */ -+ fe->ops->set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18); -+ last = (last) ? 0 : 1; -+ } -+ cmd = cmd >> 1; -+ if (i != 8) -+ dvb_frontend_sleep_until(&nexttime, 8000); -+ } -+ if (dvb_frontend_debug) { -+ printk("%s(%d): switch delay (should be 32k followed by all 8k\n", -+ __FUNCTION__, fe->dvb->num); -+ for (i = 1; i < 10; i++) -+ printk("%d: %d\n", i, timeval_usec_diff(tv[i-1] , tv[i])); -+ } -+ err = 0; -+ fepriv->state = FESTATE_DISEQC; -+ fepriv->status = 0; - } - break; - -@@ -882,6 +990,7 @@ - init_MUTEX (&fepriv->events.sem); - fe->dvb = dvb; - fepriv->inversion = INVERSION_OFF; -+ fepriv->tone = SEC_TONE_OFF; - - printk ("DVB: registering frontend %i (%s)...\n", - fe->dvb->num, -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_frontend.h linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_frontend.h ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_frontend.h 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_frontend.h 2006-04-11 14:31:37.000000000 +0200 -@@ -40,28 +40,6 @@ - - #include "dvbdev.h" - --/* FIXME: Move to i2c-id.h */ --#define I2C_DRIVERID_DVBFE_SP8870 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_CX22700 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_AT76C651 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_CX24110 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_CX22702 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_DIB3000MB I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_DST I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_DUMMY I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_L64781 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_MT312 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_MT352 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_NXT6000 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_SP887X I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_STV0299 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_TDA1004X I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_TDA8083 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_VES1820 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_VES1X93 I2C_DRIVERID_EXP2 --#define I2C_DRIVERID_DVBFE_TDA80XX I2C_DRIVERID_EXP2 -- -- - struct dvb_frontend_tune_settings { - int min_delay_ms; - int step_size; -@@ -123,4 +101,7 @@ - - extern int dvb_unregister_frontend(struct dvb_frontend* fe); - -+extern void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec); -+extern s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime); -+ - #endif -diff -Naur linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_net.c linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_net.c ---- linux-2.6.12.6/drivers/media/dvb/dvb-core/dvb_net.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/dvb-core/dvb_net.c 2006-04-11 14:31:37.000000000 +0200 -@@ -62,7 +62,6 @@ - #include <linux/uio.h> - #include <asm/uaccess.h> - #include <linux/crc32.h> --#include <linux/version.h> - - #include "dvb_demux.h" - #include "dvb_net.h" -@@ -171,11 +170,7 @@ - - skb->mac.raw=skb->data; - skb_pull(skb,dev->hard_header_len); --#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,8) -- eth = skb->mac.ethernet; --#else - eth = eth_hdr(skb); --#endif - - if (*eth->h_dest & 1) { - if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0) -@@ -908,7 +903,7 @@ - return ret; - } - -- ret = priv->secfeed->set(priv->secfeed, priv->pid, 32768, 0, 1); -+ ret = priv->secfeed->set(priv->secfeed, priv->pid, 32768, 1); - - if (ret<0) { - printk("%s: could not set section feed\n", dev->name); -@@ -960,9 +955,7 @@ - priv->tsfeed->priv = (void *)dev; - ret = priv->tsfeed->set(priv->tsfeed, priv->pid, - TS_PACKET, DMX_TS_PES_OTHER, -- 188 * 100, /* nr. of bytes delivered per callback */ - 32768, /* circular buffer size */ -- 0, /* descramble */ - timeout); - - if (ret < 0) { -diff -Naur linux-2.6.12.6/drivers/media/dvb/frontends/stv0299.c linux-2.6.12.6-patched/drivers/media/dvb/frontends/stv0299.c ---- linux-2.6.12.6/drivers/media/dvb/frontends/stv0299.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/frontends/stv0299.c 2006-04-11 14:31:37.000000000 +0200 -@@ -48,6 +48,7 @@ - #include <linux/moduleparam.h> - #include <linux/string.h> - #include <linux/slab.h> -+#include <linux/jiffies.h> - #include <asm/div64.h> - - #include "dvb_frontend.h" -@@ -386,36 +387,6 @@ - }; - } - --static inline s32 stv0299_calc_usec_delay (struct timeval lasttime, struct timeval curtime) --{ -- return ((curtime.tv_usec < lasttime.tv_usec) ? -- 1000000 - lasttime.tv_usec + curtime.tv_usec : -- curtime.tv_usec - lasttime.tv_usec); --} -- --static void stv0299_sleep_until (struct timeval *waketime, u32 add_usec) --{ -- struct timeval lasttime; -- s32 delta, newdelta; -- -- waketime->tv_usec += add_usec; -- if (waketime->tv_usec >= 1000000) { -- waketime->tv_usec -= 1000000; -- waketime->tv_sec++; -- } -- -- do_gettimeofday (&lasttime); -- delta = stv0299_calc_usec_delay (lasttime, *waketime); -- if (delta > 2500) { -- msleep ((delta - 1500) / 1000); -- do_gettimeofday (&lasttime); -- newdelta = stv0299_calc_usec_delay (lasttime, *waketime); -- delta = (newdelta > delta) ? 0 : newdelta; -- } -- if (delta > 0) -- udelay (delta); --} -- - static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, u32 cmd) - { - struct stv0299_state* state = fe->demodulator_priv; -@@ -443,7 +414,7 @@ - memcpy (&tv[0], &nexttime, sizeof (struct timeval)); - stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */ - -- stv0299_sleep_until (&nexttime, 32000); -+ dvb_frontend_sleep_until(&nexttime, 32000); - - for (i=0; i<9; i++) { - if (debug_legacy_dish_switch) -@@ -457,13 +428,13 @@ - cmd = cmd >> 1; - - if (i != 8) -- stv0299_sleep_until (&nexttime, 8000); -+ dvb_frontend_sleep_until(&nexttime, 8000); - } - if (debug_legacy_dish_switch) { - printk ("%s(%d): switch delay (should be 32k followed by all 8k\n", - __FUNCTION__, fe->dvb->num); -- for (i=1; i < 10; i++) -- printk ("%d: %d\n", i, stv0299_calc_usec_delay (tv[i-1] , tv[i])); -+ for (i = 1; i < 10; i++) -+ printk ("%d: %d\n", i, timeval_usec_diff(tv[i-1] , tv[i])); - } - - return 0; -@@ -481,7 +452,7 @@ - - if (state->config->pll_init) { - stv0299_writeregI(state, 0x05, 0xb5); /* enable i2c repeater on stv0299 */ -- state->config->pll_init(fe); -+ state->config->pll_init(fe, state->i2c); - stv0299_writeregI(state, 0x05, 0x35); /* disable i2c repeater on stv0299 */ - } - -@@ -582,49 +553,14 @@ - if (state->config->invert) invval = (~invval) & 1; - stv0299_writeregI(state, 0x0c, (stv0299_readreg(state, 0x0c) & 0xfe) | invval); - -- if (state->config->enhanced_tuning) { -- /* check if we should do a finetune */ -- int frequency_delta = p->frequency - state->tuner_frequency; -- int minmax = p->u.qpsk.symbol_rate / 2000; -- if (minmax < 5000) minmax = 5000; -- -- if ((frequency_delta > -minmax) && (frequency_delta < minmax) && (frequency_delta != 0) && -- (state->fec_inner == p->u.qpsk.fec_inner) && -- (state->symbol_rate == p->u.qpsk.symbol_rate)) { -- int Drot_freq = (frequency_delta << 16) / (state->config->mclk / 1000); -- -- // zap the derotator registers first -- stv0299_writeregI(state, 0x22, 0x00); -- stv0299_writeregI(state, 0x23, 0x00); -- -- // now set them as we want -- stv0299_writeregI(state, 0x22, Drot_freq >> 8); -- stv0299_writeregI(state, 0x23, Drot_freq); -- } else { -- /* A "normal" tune is requested */ -- stv0299_writeregI(state, 0x05, 0xb5); /* enable i2c repeater on stv0299 */ -- state->config->pll_set(fe, p); -- stv0299_writeregI(state, 0x05, 0x35); /* disable i2c repeater on stv0299 */ -- -- stv0299_writeregI(state, 0x32, 0x80); -- stv0299_writeregI(state, 0x22, 0x00); -- stv0299_writeregI(state, 0x23, 0x00); -- stv0299_writeregI(state, 0x32, 0x19); -- stv0299_set_symbolrate (fe, p->u.qpsk.symbol_rate); -- stv0299_set_FEC (state, p->u.qpsk.fec_inner); -- } -- } else { -- stv0299_writeregI(state, 0x05, 0xb5); /* enable i2c repeater on stv0299 */ -- state->config->pll_set(fe, p); -- stv0299_writeregI(state, 0x05, 0x35); /* disable i2c repeater on stv0299 */ -- -- stv0299_set_FEC (state, p->u.qpsk.fec_inner); -- stv0299_set_symbolrate (fe, p->u.qpsk.symbol_rate); -- stv0299_writeregI(state, 0x22, 0x00); -- stv0299_writeregI(state, 0x23, 0x00); -- stv0299_readreg (state, 0x23); -- stv0299_writeregI(state, 0x12, 0xb9); -- } -+ stv0299_writeregI(state, 0x05, 0xb5); /* enable i2c repeater on stv0299 */ -+ state->config->pll_set(fe, state->i2c, p); -+ stv0299_writeregI(state, 0x05, 0x35); /* disable i2c repeater on stv0299 */ -+ -+ stv0299_set_FEC (state, p->u.qpsk.fec_inner); -+ stv0299_set_symbolrate (fe, p->u.qpsk.symbol_rate); -+ stv0299_writeregI(state, 0x22, 0x00); -+ stv0299_writeregI(state, 0x23, 0x00); - - state->tuner_frequency = p->frequency; - state->fec_inner = p->u.qpsk.fec_inner; -diff -Naur linux-2.6.12.6/drivers/media/dvb/frontends/stv0299.h linux-2.6.12.6-patched/drivers/media/dvb/frontends/stv0299.h ---- linux-2.6.12.6/drivers/media/dvb/frontends/stv0299.h 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/frontends/stv0299.h 2006-04-11 14:31:37.000000000 +0200 -@@ -73,9 +73,6 @@ - /* does the inversion require inversion? */ - u8 invert:1; - -- /* Should the enhanced tuning code be used? */ -- u8 enhanced_tuning:1; -- - /* Skip reinitialisation? */ - u8 skip_reinit:1; - -@@ -92,8 +89,8 @@ - int (*set_symbol_rate)(struct dvb_frontend* fe, u32 srate, u32 ratio); - - /* PLL maintenance */ -- int (*pll_init)(struct dvb_frontend* fe); -- int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params); -+ int (*pll_init)(struct dvb_frontend *fe, struct i2c_adapter *i2c); -+ int (*pll_set)(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters *params); - }; - - extern int stv0299_writereg (struct dvb_frontend* fe, u8 reg, u8 data); -diff -Naur linux-2.6.12.6/drivers/media/dvb/frontends/tda1004x.c linux-2.6.12.6-patched/drivers/media/dvb/frontends/tda1004x.c ---- linux-2.6.12.6/drivers/media/dvb/frontends/tda1004x.c 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/frontends/tda1004x.c 2006-04-11 14:31:37.000000000 +0200 -@@ -32,6 +32,10 @@ - #include <linux/module.h> - #include <linux/moduleparam.h> - #include <linux/device.h> -+#include <linux/jiffies.h> -+#include <linux/string.h> -+#include <linux/slab.h> -+ - #include "dvb_frontend.h" - #include "tda1004x.h" - -@@ -49,10 +53,8 @@ - /* private demod data */ - u8 initialised; - enum tda1004x_demod demod_type; -- u8 fw_version; - }; - -- - static int debug; - #define dprintk(args...) \ - do { \ -@@ -122,6 +124,8 @@ - #define TDA10046H_GPIO_OUT_SEL 0x41 - #define TDA10046H_GPIO_SELECT 0x42 - #define TDA10046H_AGC_CONF 0x43 -+#define TDA10046H_AGC_THR 0x44 -+#define TDA10046H_AGC_RENORM 0x45 - #define TDA10046H_AGC_GAINS 0x46 - #define TDA10046H_AGC_TUN_MIN 0x47 - #define TDA10046H_AGC_TUN_MAX 0x48 -@@ -267,21 +271,58 @@ - static int tda10046h_set_bandwidth(struct tda1004x_state *state, - fe_bandwidth_t bandwidth) - { -- static u8 bandwidth_6mhz[] = { 0x80, 0x15, 0xfe, 0xab, 0x8e }; -- static u8 bandwidth_7mhz[] = { 0x6e, 0x02, 0x53, 0xc8, 0x25 }; -- static u8 bandwidth_8mhz[] = { 0x60, 0x12, 0xa8, 0xe4, 0xbd }; -- -+ static u8 bandwidth_6mhz_53M[] = { 0x7b, 0x2e, 0x11, 0xf0, 0xd2 }; -+ static u8 bandwidth_7mhz_53M[] = { 0x6a, 0x02, 0x6a, 0x43, 0x9f }; -+ static u8 bandwidth_8mhz_53M[] = { 0x5c, 0x32, 0xc2, 0x96, 0x6d }; -+ -+ static u8 bandwidth_6mhz_48M[] = { 0x70, 0x02, 0x49, 0x24, 0x92 }; -+ static u8 bandwidth_7mhz_48M[] = { 0x60, 0x02, 0xaa, 0xaa, 0xab }; -+ static u8 bandwidth_8mhz_48M[] = { 0x54, 0x03, 0x0c, 0x30, 0xc3 }; -+ int tda10046_clk53m; -+ -+ if ((state->config->if_freq == TDA10046_FREQ_045) || -+ (state->config->if_freq == TDA10046_FREQ_052)) -+ tda10046_clk53m = 0; -+ else -+ tda10046_clk53m = 1; - switch (bandwidth) { - case BANDWIDTH_6_MHZ: -- tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz, sizeof(bandwidth_6mhz)); -+ if (tda10046_clk53m) -+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_53M, -+ sizeof(bandwidth_6mhz_53M)); -+ else -+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_48M, -+ sizeof(bandwidth_6mhz_48M)); -+ if (state->config->if_freq == TDA10046_FREQ_045) { -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0a); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xab); -+ } - break; - - case BANDWIDTH_7_MHZ: -- tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz, sizeof(bandwidth_7mhz)); -+ if (tda10046_clk53m) -+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_53M, -+ sizeof(bandwidth_7mhz_53M)); -+ else -+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_48M, -+ sizeof(bandwidth_7mhz_48M)); -+ if (state->config->if_freq == TDA10046_FREQ_045) { -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00); -+ } - break; - - case BANDWIDTH_8_MHZ: -- tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz, sizeof(bandwidth_8mhz)); -+ if (tda10046_clk53m) -+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_53M, -+ sizeof(bandwidth_8mhz_53M)); -+ else -+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_48M, -+ sizeof(bandwidth_8mhz_48M)); -+ if (state->config->if_freq == TDA10046_FREQ_045) { -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x55); -+ } - break; - - default: -@@ -315,20 +356,35 @@ - memcpy(buf + 1, mem + pos, tx_size); - fw_msg.len = tx_size + 1; - if (i2c_transfer(state->i2c, &fw_msg, 1) != 1) { -- printk("tda1004x: Error during firmware upload\n"); -+ printk(KERN_ERR "tda1004x: Error during firmware upload\n"); - return -EIO; - } - pos += tx_size; - - dprintk("%s: fw_pos=0x%x\n", __FUNCTION__, pos); - } -+ // give the DSP a chance to settle 03/10/05 Hac -+ msleep(100); - - return 0; - } - --static int tda1004x_check_upload_ok(struct tda1004x_state *state, u8 dspVersion) -+static int tda1004x_check_upload_ok(struct tda1004x_state *state) - { - u8 data1, data2; -+ unsigned long timeout; -+ -+ if (state->demod_type == TDA1004X_DEMOD_TDA10046) { -+ timeout = jiffies + 2 * HZ; -+ while(!(tda1004x_read_byte(state, TDA1004X_STATUS_CD) & 0x20)) { -+ if (time_after(jiffies, timeout)) { -+ printk(KERN_ERR "tda1004x: timeout waiting for DSP ready\n"); -+ break; -+ } -+ msleep(1); -+ } -+ } else -+ msleep(100); - - // check upload was OK - tda1004x_write_mask(state, TDA1004X_CONFC4, 0x10, 0); // we want to read from the DSP -@@ -336,9 +392,11 @@ - - data1 = tda1004x_read_byte(state, TDA1004X_DSP_DATA1); - data2 = tda1004x_read_byte(state, TDA1004X_DSP_DATA2); -- if ((data1 != 0x67) || (data2 != dspVersion)) -+ if (data1 != 0x67 || data2 < 0x20 || data2 > 0x2e) { -+ printk(KERN_INFO "tda1004x: found firmware revision %x -- invalid\n", data2); - return -EIO; -- -+ } -+ printk(KERN_INFO "tda1004x: found firmware revision %x -- ok\n", data2); - return 0; - } - -@@ -349,14 +407,14 @@ - const struct firmware *fw; - - /* don't re-upload unless necessary */ -- if (tda1004x_check_upload_ok(state, 0x2c) == 0) -+ if (tda1004x_check_upload_ok(state) == 0) - return 0; - - /* request the firmware, this will block until someone uploads it */ -- printk("tda1004x: waiting for firmware upload (%s)...\n", TDA10045_DEFAULT_FIRMWARE); -+ printk(KERN_INFO "tda1004x: waiting for firmware upload (%s)...\n", TDA10045_DEFAULT_FIRMWARE); - ret = state->config->request_firmware(fe, &fw, TDA10045_DEFAULT_FIRMWARE); - if (ret) { -- printk("tda1004x: no firmware upload (timeout or file not found?)\n"); -+ printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n"); - return ret; - } - -@@ -370,95 +428,111 @@ - tda10045h_set_bandwidth(state, BANDWIDTH_8_MHZ); - - ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10045H_FWPAGE, TDA10045H_CODE_IN); -+ release_firmware(fw); - if (ret) - return ret; -- printk("tda1004x: firmware upload complete\n"); -+ printk(KERN_INFO "tda1004x: firmware upload complete\n"); - - /* wait for DSP to initialise */ - /* DSPREADY doesn't seem to work on the TDA10045H */ - msleep(100); - -- return tda1004x_check_upload_ok(state, 0x2c); -+ return tda1004x_check_upload_ok(state); - } - --static int tda10046_get_fw_version(struct tda1004x_state *state, -- const struct firmware *fw) -+static void tda10046_init_plls(struct dvb_frontend* fe) - { -- const unsigned char pattern[] = { 0x67, 0x00, 0x50, 0x62, 0x5e, 0x18, 0x67 }; -- unsigned int i; -+ struct tda1004x_state* state = fe->demodulator_priv; -+ int tda10046_clk53m; - -- /* area guessed from firmware v20, v21 and v25 */ -- for (i = 0x660; i < 0x700; i++) { -- if (!memcmp(&fw->data[i], pattern, sizeof(pattern))) { -- state->fw_version = fw->data[i + sizeof(pattern)]; -- printk(KERN_INFO "tda1004x: using firmware v%02x\n", -- state->fw_version); -- return 0; -- } -- } -+ if ((state->config->if_freq == TDA10046_FREQ_045) || -+ (state->config->if_freq == TDA10046_FREQ_052)) -+ tda10046_clk53m = 0; -+ else -+ tda10046_clk53m = 1; - -- return -EINVAL; -+ tda1004x_write_byteI(state, TDA10046H_CONFPLL1, 0xf0); -+ if(tda10046_clk53m) { -+ printk(KERN_INFO "tda1004x: setting up plls for 53MHz sampling clock\n"); -+ tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x08); // PLL M = 8 -+ } else { -+ printk(KERN_INFO "tda1004x: setting up plls for 48MHz sampling clock\n"); -+ tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x03); // PLL M = 3 -+ } -+ if (state->config->xtal_freq == TDA10046_XTAL_4M ) { -+ dprintk("%s: setting up PLLs for a 4 MHz Xtal\n", __FUNCTION__); -+ tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 0); // PLL P = N = 0 -+ } else { -+ dprintk("%s: setting up PLLs for a 16 MHz Xtal\n", __FUNCTION__); -+ tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 3); // PLL P = 0, N = 3 -+ } -+ if(tda10046_clk53m) -+ tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x67); -+ else -+ tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x72); -+ /* Note clock frequency is handled implicitly */ -+ switch (state->config->if_freq) { -+ case TDA10046_FREQ_045: -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00); -+ break; -+ case TDA10046_FREQ_052: -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xc7); -+ break; -+ case TDA10046_FREQ_3617: -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x59); -+ break; -+ case TDA10046_FREQ_3613: -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7); -+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x3f); -+ break; -+ } -+ tda10046h_set_bandwidth(state, BANDWIDTH_8_MHZ); // default bandwidth 8 MHz -+ /* let the PLLs settle */ -+ msleep(120); - } - - static int tda10046_fwupload(struct dvb_frontend* fe) - { - struct tda1004x_state* state = fe->demodulator_priv; -- unsigned long timeout; - int ret; - const struct firmware *fw; - - /* reset + wake up chip */ -- tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 0); -+ tda1004x_write_byteI(state, TDA1004X_CONFC4, 0); - tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0); -- msleep(100); -+ /* let the clocks recover from sleep */ -+ msleep(5); -+ -+ /* The PLLs need to be reprogrammed after sleep */ -+ tda10046_init_plls(fe); - - /* don't re-upload unless necessary */ -- if (tda1004x_check_upload_ok(state, state->fw_version) == 0) -+ if (tda1004x_check_upload_ok(state) == 0) - return 0; - -- /* request the firmware, this will block until someone uploads it */ -- printk("tda1004x: waiting for firmware upload (%s)...\n", TDA10046_DEFAULT_FIRMWARE); -- ret = state->config->request_firmware(fe, &fw, TDA10046_DEFAULT_FIRMWARE); -- if (ret) { -- printk("tda1004x: no firmware upload (timeout or file not found?)\n"); -- return ret; -- } -- -- if (fw->size < 24478) { /* size of firmware v20, which is the smallest of v20, v21 and v25 */ -- printk("tda1004x: firmware file seems to be too small (%d bytes)\n", fw->size); -- return -EINVAL; -- } -- -- ret = tda10046_get_fw_version(state, fw); -- if (ret < 0) { -- printk("tda1004x: unable to find firmware version\n"); -- return ret; -- } -- -- /* set parameters */ -- tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 10); -- tda1004x_write_byteI(state, TDA10046H_CONFPLL3, state->config->n_i2c); -- tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 99); -- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd4); -- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x2c); -- tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); // going to boot from HOST -- -- ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10046H_CODE_CPT, TDA10046H_CODE_IN); -- if (ret) -- return ret; -- printk("tda1004x: firmware upload complete\n"); -- -- /* wait for DSP to initialise */ -- timeout = jiffies + HZ; -- while (!(tda1004x_read_byte(state, TDA1004X_STATUS_CD) & 0x20)) { -- if (time_after(jiffies, timeout)) { -- printk("tda1004x: DSP failed to initialised.\n"); -- return -EIO; -+ if (state->config->request_firmware != NULL) { -+ /* request the firmware, this will block until someone uploads it */ -+ printk(KERN_INFO "tda1004x: waiting for firmware upload...\n"); -+ ret = state->config->request_firmware(fe, &fw, TDA10046_DEFAULT_FIRMWARE); -+ if (ret) { -+ printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n"); -+ return ret; - } -- msleep(1); -+ tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); // going to boot from HOST -+ ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10046H_CODE_CPT, TDA10046H_CODE_IN); -+ release_firmware(fw); -+ if (ret) -+ return ret; -+ } else { -+ /* boot from firmware eeprom */ -+ printk(KERN_INFO "tda1004x: booting from eeprom\n"); -+ tda1004x_write_mask(state, TDA1004X_CONFC4, 4, 4); -+ msleep(300); - } -- -- return tda1004x_check_upload_ok(state, state->fw_version); -+ return tda1004x_check_upload_ok(state); - } - - static int tda1004x_encode_fec(int fec) -@@ -560,45 +634,54 @@ - - if (tda10046_fwupload(fe)) { - printk("tda1004x: firmware upload failed\n"); -- return -EIO; -+ return -EIO; - } - -- tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 0); // wake up the chip -- -- // Init the PLL -+ // Init the tuner PLL - if (state->config->pll_init) { - tda1004x_enable_tuner_i2c(state); -- state->config->pll_init(fe); -+ if (state->config->pll_init(fe)) { -+ printk(KERN_ERR "tda1004x: pll init failed\n"); -+ return -EIO; -+ } - tda1004x_disable_tuner_i2c(state); - } - - // tda setup - tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer -- tda1004x_write_mask(state, TDA1004X_CONFC1, 0x40, 0x40); -- tda1004x_write_mask(state, TDA1004X_AUTO, 8, 0); // select HP stream -- tda1004x_write_mask(state, TDA1004X_CONFC1, 0x80, 0); // disable pulse killer -- tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 10); // PLL M = 10 -- tda1004x_write_byteI(state, TDA10046H_CONFPLL3, state->config->n_i2c); // PLL P = N = 0 -- tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 99); // FREQOFFS = 99 -- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd4); // } PHY2 = -11221 -- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x2c); // } -- tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0); // AGC setup -- tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x60, 0x60); // set AGC polarities -+ tda1004x_write_byteI(state, TDA1004X_AUTO, 0x87); // 100 ppm crystal, select HP stream -+ tda1004x_write_byteI(state, TDA1004X_CONFC1, 8); // disable pulse killer -+ -+ switch (state->config->agc_config) { -+ case TDA10046_AGC_DEFAULT: -+ tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x00); // AGC setup -+ tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities -+ break; -+ case TDA10046_AGC_IFO_AUTO_NEG: -+ tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup -+ tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities -+ break; -+ case TDA10046_AGC_IFO_AUTO_POS: -+ tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup -+ tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x00); // set AGC polarities -+ break; -+ case TDA10046_AGC_TDA827X: -+ tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup -+ tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold -+ tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize -+ tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities -+ break; -+ } -+ tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38); -+ tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on - tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MIN, 0); // } - tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MAX, 0xff); // } AGC min/max values - tda1004x_write_byteI(state, TDA10046H_AGC_IF_MIN, 0); // } - tda1004x_write_byteI(state, TDA10046H_AGC_IF_MAX, 0xff); // } -- tda1004x_write_mask(state, TDA10046H_CVBER_CTRL, 0x30, 0x10); // 10^6 VBER measurement bits -- tda1004x_write_byteI(state, TDA10046H_AGC_GAINS, 1); // IF gain 2, TUN gain 1 -- tda1004x_write_mask(state, TDA1004X_AUTO, 0x80, 0); // crystal is 50ppm -+ tda1004x_write_byteI(state, TDA10046H_AGC_GAINS, 0x12); // IF gain 2, TUN gain 1 -+ tda1004x_write_byteI(state, TDA10046H_CVBER_CTRL, 0x1a); // 10^6 VBER measurement bits - tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config -- tda1004x_write_mask(state, TDA1004X_CONF_TS2, 0x31, 0); // MPEG2 interface config -- tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0x9e, 0); // disable AGC_TUN -- tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE2, 0xe1); // tristate setup -- tda1004x_write_byteI(state, TDA10046H_GPIO_OUT_SEL, 0xcc); // GPIO output config -- tda1004x_write_mask(state, TDA10046H_GPIO_SELECT, 8, 8); // GPIO select -- tda10046h_set_bandwidth(state, BANDWIDTH_8_MHZ); // default bandwidth 8 MHz -- -+ tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config - tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7); - - state->initialised = 1; -@@ -626,12 +709,12 @@ - - // set frequency - tda1004x_enable_tuner_i2c(state); -- state->config->pll_set(fe, fe_params); -+ if (state->config->pll_set(fe, fe_params)) { -+ printk(KERN_ERR "tda1004x: pll set failed\n"); -+ return -EIO; -+ } - tda1004x_disable_tuner_i2c(state); - -- if (state->demod_type == TDA1004X_DEMOD_TDA10046) -- tda1004x_write_mask(state, TDA10046H_AGC_CONF, 4, 4); -- - // Hardcoded to use auto as much as possible on the TDA10045 as it - // is very unreliable if AUTO mode is _not_ used. - if (state->demod_type == TDA1004X_DEMOD_TDA10045) { -@@ -642,9 +725,9 @@ - - // Set standard params.. or put them to auto - if ((fe_params->u.ofdm.code_rate_HP == FEC_AUTO) || -- (fe_params->u.ofdm.code_rate_LP == FEC_AUTO) || -- (fe_params->u.ofdm.constellation == QAM_AUTO) || -- (fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) { -+ (fe_params->u.ofdm.code_rate_LP == FEC_AUTO) || -+ (fe_params->u.ofdm.constellation == QAM_AUTO) || -+ (fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) { - tda1004x_write_mask(state, TDA1004X_AUTO, 1, 1); // enable auto - tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x03, 0); // turn off constellation bits - tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0); // turn off hierarchy bits -@@ -794,6 +877,8 @@ - - case TDA1004X_DEMOD_TDA10046: - tda1004x_write_mask(state, TDA1004X_AUTO, 0x40, 0x40); -+ msleep(1); -+ tda1004x_write_mask(state, TDA10046H_AGC_CONF, 4, 1); - break; - } - -@@ -805,6 +890,7 @@ - static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params) - { - struct tda1004x_state* state = fe->demodulator_priv; -+ - dprintk("%s\n", __FUNCTION__); - - // inversion status -@@ -829,16 +915,18 @@ - break; - } - break; -- - case TDA1004X_DEMOD_TDA10046: - switch (tda1004x_read_byte(state, TDA10046H_TIME_WREF1)) { -- case 0x60: -+ case 0x5c: -+ case 0x54: - fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; - break; -- case 0x6e: -+ case 0x6a: -+ case 0x60: - fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; - break; -- case 0x80: -+ case 0x7b: -+ case 0x70: - fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; - break; - } -@@ -1012,8 +1100,7 @@ - tmp = tda1004x_read_byte(state, TDA1004X_SNR); - if (tmp < 0) - return -EIO; -- if (tmp) -- tmp = 255 - tmp; -+ tmp = 255 - tmp; - - *snr = ((tmp << 8) | tmp); - dprintk("%s: snr=0x%x\n", __FUNCTION__, *snr); -@@ -1089,6 +1176,16 @@ - break; - - case TDA1004X_DEMOD_TDA10046: -+ if (state->config->pll_sleep != NULL) { -+ tda1004x_enable_tuner_i2c(state); -+ state->config->pll_sleep(fe); -+ if (state->config->if_freq != TDA10046_FREQ_052) { -+ /* special hack for Philips EUROPA Based boards: -+ * keep the I2c bridge open for tuner access in analog mode -+ */ -+ tda1004x_disable_tuner_i2c(state); -+ } -+ } - tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1); - break; - } -@@ -1100,8 +1197,9 @@ - static int tda1004x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) - { - fesettings->min_delay_ms = 800; -- fesettings->step_size = 166667; -- fesettings->max_drift = 166667*2; -+ /* Drift compensation makes no sense for DVB-T */ -+ fesettings->step_size = 0; -+ fesettings->max_drift = 0; - return 0; - } - -@@ -1216,7 +1314,6 @@ - memcpy(&state->ops, &tda10046_ops, sizeof(struct dvb_frontend_ops)); - state->initialised = 0; - state->demod_type = TDA1004X_DEMOD_TDA10046; -- state->fw_version = 0x20; /* dummy default value */ - - /* check if the demod is there */ - if (tda1004x_read_byte(state, TDA1004X_CHIPID) != 0x46) { -diff -Naur linux-2.6.12.6/drivers/media/dvb/frontends/tda1004x.h linux-2.6.12.6-patched/drivers/media/dvb/frontends/tda1004x.h ---- linux-2.6.12.6/drivers/media/dvb/frontends/tda1004x.h 2005-08-29 18:55:27.000000000 +0200 -+++ linux-2.6.12.6-patched/drivers/media/dvb/frontends/tda1004x.h 2006-04-11 14:31:37.000000000 +0200 -@@ -26,6 +26,25 @@ - #include <linux/dvb/frontend.h> - #include <linux/firmware.h> - -+enum tda10046_xtal { -+ TDA10046_XTAL_4M, -+ TDA10046_XTAL_16M, -+}; -+ -+enum tda10046_agc { -+ TDA10046_AGC_DEFAULT, /* original configuration */ -+ TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */ -+ TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */ -+ TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */ -+}; -+ -+enum tda10046_if { -+ TDA10046_FREQ_3617, /* original config, 36,166 MHZ */ -+ TDA10046_FREQ_3613, /* 36,13 MHZ */ -+ TDA10046_FREQ_045, /* low IF, 4.0, 4.5, or 5.0 MHZ */ -+ TDA10046_FREQ_052, /* low IF, 5.1667 MHZ for tda9889 */ -+}; -+ - struct tda1004x_config - { - /* the demodulator's i2c address */ -@@ -37,14 +56,22 @@ - /* Does the OCLK signal need inverted? */ - u8 invert_oclk; - -- /* value of N_I2C of the CONF_PLL3 register */ -- u8 n_i2c; -+ /* Xtal frequency, 4 or 16MHz*/ -+ enum tda10046_xtal xtal_freq; -+ -+ /* IF frequency */ -+ enum tda10046_if if_freq; -+ -+ /* AGC configuration */ -+ enum tda10046_agc agc_config; - - /* PLL maintenance */ - int (*pll_init)(struct dvb_frontend* fe); -+ void (*pll_sleep)(struct dvb_frontend* fe); - int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params); - - /* request firmware for device */ -+ /* set this to NULL if the card has a firmware EEPROM */ - int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name); - }; - diff --git a/packages/linux/linux-dm800/linuxmips-2.6.12-dream-r6.patch b/packages/linux/linux-dm800/linuxmips-2.6.12-dream-r6.patch deleted file mode 100644 index 3cee581e7c..0000000000 --- a/packages/linux/linux-dm800/linuxmips-2.6.12-dream-r6.patch +++ /dev/null @@ -1,4785 +0,0 @@ -diff -Naur 2.6.12-5.0-org/arch/mips/kernel/gdb-low.S 2.6.12-5.0-patched/arch/mips/kernel/gdb-low.S ---- 2.6.12-5.0-org/arch/mips/kernel/gdb-low.S 2007-07-26 00:51:07.000000000 +0200 -+++ 2.6.12-5.0-patched/arch/mips/kernel/gdb-low.S 2007-12-11 12:34:52.000000000 +0100 -@@ -52,12 +52,14 @@ - /* - * Called from user mode, go somewhere else. - */ -- lui k1, %hi(saved_vectors) - mfc0 k0, CP0_CAUSE - andi k0, k0, 0x7c -- add k1, k1, k0 -- lw k0, %lo(saved_vectors)(k1) -- jr k0 -+ -+#ifdef CONFIG_MIPS64 -+ dsll k0, k0, 1 -+#endif -+ lw k1, %lo(saved_vectors)(k0) -+ jr k1 - nop - 1: - move k0, sp -diff -Naur 2.6.12-5.0-org/arch/mips/kernel/reset.c 2.6.12-5.0-patched/arch/mips/kernel/reset.c ---- 2.6.12-5.0-org/arch/mips/kernel/reset.c 2007-07-26 00:51:08.000000000 +0200 -+++ 2.6.12-5.0-patched/arch/mips/kernel/reset.c 2007-12-11 12:34:52.000000000 +0100 -@@ -27,6 +27,7 @@ - } - - EXPORT_SYMBOL(machine_restart); -+EXPORT_SYMBOL(_machine_restart); - - void machine_halt(void) - { -@@ -34,6 +35,7 @@ - } - - EXPORT_SYMBOL(machine_halt); -+EXPORT_SYMBOL(_machine_halt); - - void machine_power_off(void) - { -@@ -41,3 +43,4 @@ - } - - EXPORT_SYMBOL(machine_power_off); -+EXPORT_SYMBOL(_machine_power_off); -diff -Naur 2.6.12-5.0-org/arch/mips/kernel/scall32-o32.S 2.6.12-5.0-patched/arch/mips/kernel/scall32-o32.S ---- 2.6.12-5.0-org/arch/mips/kernel/scall32-o32.S 2007-07-26 00:51:08.000000000 +0200 -+++ 2.6.12-5.0-patched/arch/mips/kernel/scall32-o32.S 2007-12-11 12:34:52.000000000 +0100 -@@ -594,7 +594,7 @@ - sys sys_remap_file_pages 5 - sys sys_set_tid_address 1 - sys sys_restart_syscall 0 -- sys sys_fadvise64_64 7 -+ sys mips_fadvise64 7 - sys sys_statfs64 3 /* 4255 */ - sys sys_fstatfs64 2 - sys sys_timer_create 3 -@@ -624,6 +624,8 @@ - sys sys_request_key 4 - sys sys_keyctl 5 - sys sys_set_thread_area 1 -+ sys sys_ioprio_set 3 -+ sys sys_ioprio_get 2 - - .endm - -diff -Naur 2.6.12-5.0-org/arch/mips/kernel/syscall.c 2.6.12-5.0-patched/arch/mips/kernel/syscall.c ---- 2.6.12-5.0-org/arch/mips/kernel/syscall.c 2007-07-26 00:51:09.000000000 +0200 -+++ 2.6.12-5.0-patched/arch/mips/kernel/syscall.c 2007-12-11 12:34:52.000000000 +0100 -@@ -405,6 +405,13 @@ - } - } - -+asmlinkage long mips_fadvise64(int fd, -+ unsigned int low_off, unsigned int high_off, -+ unsigned int len, int advice, unsigned int dummy) -+{ -+ return sys_fadvise64_64(fd, (((u64)high_off) << 32) | low_off, (u64)len, advice); -+} -+ - /* - * No implemented yet ... - */ -diff -Naur 2.6.12-5.0-org/Documentation/block/ioprio.txt 2.6.12-5.0-patched/Documentation/block/ioprio.txt ---- 2.6.12-5.0-org/Documentation/block/ioprio.txt 1970-01-01 01:00:00.000000000 +0100 -+++ 2.6.12-5.0-patched/Documentation/block/ioprio.txt 2007-12-11 12:34:52.000000000 +0100 -@@ -0,0 +1,179 @@ -+Block io priorities -+=================== -+ -+ -+Intro -+----- -+ -+With the introduction of cfq v3 (aka cfq-ts or time sliced cfq), basic io -+priorities is supported for reads on files. This enables users to io nice -+processes or process groups, similar to what has been possible to cpu -+scheduling for ages. This document mainly details the current possibilites -+with cfq, other io schedulers do not support io priorities so far. -+ -+Scheduling classes -+------------------ -+ -+CFQ implements three generic scheduling classes that determine how io is -+served for a process. -+ -+IOPRIO_CLASS_RT: This is the realtime io class. This scheduling class is given -+higher priority than any other in the system, processes from this class are -+given first access to the disk every time. Thus it needs to be used with some -+care, one io RT process can starve the entire system. Within the RT class, -+there are 8 levels of class data that determine exactly how much time this -+process needs the disk for on each service. In the future this might change -+to be more directly mappable to performance, by passing in a wanted data -+rate instead. -+ -+IOPRIO_CLASS_BE: This is the best-effort scheduling class, which is the default -+for any process that hasn't set a specific io priority. The class data -+determines how much io bandwidth the process will get, it's directly mappable -+to the cpu nice levels just more coarsely implemented. 0 is the highest -+BE prio level, 7 is the lowest. The mapping between cpu nice level and io -+nice level is determined as: io_nice = (cpu_nice + 20) / 5. -+ -+IOPRIO_CLASS_IDLE: This is the idle scheduling class, processes running at this -+level only get io time when no one else needs the disk. The idle class has no -+class data, since it doesn't really apply here. -+ -+Tools -+----- -+ -+See below for a sample ionice tool. Usage: -+ -+# ionice -c<class> -n<level> -p<pid> -+ -+If pid isn't given, the current process is assumed. IO priority settings -+are inherited on fork, so you can use ionice to start the process at a given -+level: -+ -+# ionice -c2 -n0 /bin/ls -+ -+will run ls at the best-effort scheduling class at the highest priority. -+For a running process, you can give the pid instead: -+ -+# ionice -c1 -n2 -p100 -+ -+will change pid 100 to run at the realtime scheduling class, at priority 2. -+ -+---> snip ionice.c tool <--- -+ -+#include <stdio.h> -+#include <stdlib.h> -+#include <errno.h> -+#include <getopt.h> -+#include <unistd.h> -+#include <sys/ptrace.h> -+#include <asm/unistd.h> -+ -+extern int sys_ioprio_set(int, int, int); -+extern int sys_ioprio_get(int, int); -+ -+#if defined(__i386__) -+#define __NR_ioprio_set 289 -+#define __NR_ioprio_get 290 -+#elif defined(__ppc__) -+#define __NR_ioprio_set 273 -+#define __NR_ioprio_get 274 -+#elif defined(__x86_64__) -+#define __NR_ioprio_set 251 -+#define __NR_ioprio_get 252 -+#elif defined(__ia64__) -+#define __NR_ioprio_set 1274 -+#define __NR_ioprio_get 1275 -+#elif defined(__mips__) -+#define __NR_ioprio_set 4284 -+#define __NR_ioprio_get 4285 -+#else -+#error "Unsupported arch" -+#endif -+ -+_syscall3(int, ioprio_set, int, which, int, who, int, ioprio); -+_syscall2(int, ioprio_get, int, which, int, who); -+ -+enum { -+ IOPRIO_CLASS_NONE, -+ IOPRIO_CLASS_RT, -+ IOPRIO_CLASS_BE, -+ IOPRIO_CLASS_IDLE, -+}; -+ -+enum { -+ IOPRIO_WHO_PROCESS = 1, -+ IOPRIO_WHO_PGRP, -+ IOPRIO_WHO_USER, -+}; -+ -+#define IOPRIO_CLASS_SHIFT 13 -+ -+const char *to_prio[] = { "none", "realtime", "best-effort", "idle", }; -+ -+int main(int argc, char *argv[]) -+{ -+ int ioprio = 4, set = 0, ioprio_class = IOPRIO_CLASS_BE; -+ int c, pid = 0; -+ -+ while ((c = getopt(argc, argv, "+n:c:p:")) != EOF) { -+ switch (c) { -+ case 'n': -+ ioprio = strtol(optarg, NULL, 10); -+ set = 1; -+ break; -+ case 'c': -+ ioprio_class = strtol(optarg, NULL, 10); -+ set = 1; -+ break; -+ case 'p': -+ pid = strtol(optarg, NULL, 10); -+ break; -+ } -+ } -+ -+ switch (ioprio_class) { -+ case IOPRIO_CLASS_NONE: -+ ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_RT: -+ case IOPRIO_CLASS_BE: -+ break; -+ case IOPRIO_CLASS_IDLE: -+ ioprio = 7; -+ break; -+ default: -+ printf("bad prio class %d\n", ioprio_class); -+ return 1; -+ } -+ -+ if (!set) { -+ if (!pid && argv[optind]) -+ pid = strtol(argv[optind], NULL, 10); -+ -+ ioprio = ioprio_get(IOPRIO_WHO_PROCESS, pid); -+ -+ printf("pid=%d, %d\n", pid, ioprio); -+ -+ if (ioprio == -1) -+ perror("ioprio_get"); -+ else { -+ ioprio_class = ioprio >> IOPRIO_CLASS_SHIFT; -+ ioprio = ioprio & 0xff; -+ printf("%s: prio %d\n", to_prio[ioprio_class], ioprio); -+ } -+ } else { -+ if (ioprio_set(IOPRIO_WHO_PROCESS, pid, ioprio | ioprio_class << IOPRIO_CLASS_SHIFT) == -1) { -+ perror("ioprio_set"); -+ return 1; -+ } -+ -+ if (argv[optind]) -+ execvp(argv[optind], &argv[optind]); -+ } -+ -+ return 0; -+} -+ -+---> snip ionice.c tool <--- -+ -+ -+March 11 2005, Jens Axboe <axboe@suse.de> -diff -Naur 2.6.12-5.0-org/drivers/block/as-iosched.c 2.6.12-5.0-patched/drivers/block/as-iosched.c ---- 2.6.12-5.0-org/drivers/block/as-iosched.c 2007-07-26 00:53:20.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/block/as-iosched.c 2007-12-11 12:34:52.000000000 +0100 -@@ -1806,7 +1806,8 @@ - rq->elevator_private = NULL; - } - --static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) -+static int as_set_request(request_queue_t *q, struct request *rq, -+ struct bio *bio, int gfp_mask) - { - struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); -@@ -1827,7 +1828,7 @@ - return 1; - } - --static int as_may_queue(request_queue_t *q, int rw) -+static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) - { - int ret = ELV_MQUEUE_MAY; - struct as_data *ad = q->elevator->elevator_data; -diff -Naur 2.6.12-5.0-org/drivers/block/cfq-iosched.c 2.6.12-5.0-patched/drivers/block/cfq-iosched.c ---- 2.6.12-5.0-org/drivers/block/cfq-iosched.c 2007-07-26 00:53:20.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/block/cfq-iosched.c 2007-12-11 12:34:52.000000000 +0100 -@@ -21,22 +21,34 @@ - #include <linux/hash.h> - #include <linux/rbtree.h> - #include <linux/mempool.h> -- --static unsigned long max_elapsed_crq; --static unsigned long max_elapsed_dispatch; -+#include <linux/ioprio.h> -+#include <linux/writeback.h> - - /* - * tunables - */ - static int cfq_quantum = 4; /* max queue in one round of service */ - static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ --static int cfq_service = HZ; /* period over which service is avg */ --static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */ --static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */ --static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */ -+static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; - static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ - static int cfq_back_penalty = 2; /* penalty of a backwards seek */ - -+static int cfq_slice_sync = HZ / 10; -+static int cfq_slice_async = HZ / 25; -+static int cfq_slice_async_rq = 2; -+static int cfq_slice_idle = HZ / 100; -+ -+#define CFQ_IDLE_GRACE (HZ / 10) -+#define CFQ_SLICE_SCALE (5) -+ -+#define CFQ_KEY_ASYNC (0) -+#define CFQ_KEY_ANY (0xffff) -+ -+/* -+ * disable queueing at the driver/hardware level -+ */ -+static int cfq_max_depth = 2; -+ - /* - * for the hash of cfqq inside the cfqd - */ -@@ -55,6 +67,7 @@ - #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) - - #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) -+#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) - - #define RQ_DATA(rq) (rq)->elevator_private - -@@ -75,78 +88,110 @@ - #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) - #define rq_rb_key(rq) (rq)->sector - --/* -- * threshold for switching off non-tag accounting -- */ --#define CFQ_MAX_TAG (4) -- --/* -- * sort key types and names -- */ --enum { -- CFQ_KEY_PGID, -- CFQ_KEY_TGID, -- CFQ_KEY_UID, -- CFQ_KEY_GID, -- CFQ_KEY_LAST, --}; -- --static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL }; -- - static kmem_cache_t *crq_pool; - static kmem_cache_t *cfq_pool; - static kmem_cache_t *cfq_ioc_pool; - -+#define CFQ_PRIO_LISTS IOPRIO_BE_NR -+#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) -+#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) -+#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) -+ -+#define ASYNC (0) -+#define SYNC (1) -+ -+#define cfq_cfqq_dispatched(cfqq) \ -+ ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC]) -+ -+#define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) -+ -+#define cfq_cfqq_sync(cfqq) \ -+ (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) -+ -+/* -+ * Per block device queue structure -+ */ - struct cfq_data { -- struct list_head rr_list; -+ atomic_t ref; -+ request_queue_t *queue; -+ -+ /* -+ * rr list of queues with requests and the count of them -+ */ -+ struct list_head rr_list[CFQ_PRIO_LISTS]; -+ struct list_head busy_rr; -+ struct list_head cur_rr; -+ struct list_head idle_rr; -+ unsigned int busy_queues; -+ -+ /* -+ * non-ordered list of empty cfqq's -+ */ - struct list_head empty_list; - -+ /* -+ * cfqq lookup hash -+ */ - struct hlist_head *cfq_hash; -- struct hlist_head *crq_hash; - -- /* queues on rr_list (ie they have pending requests */ -- unsigned int busy_queues; -+ /* -+ * global crq hash for all queues -+ */ -+ struct hlist_head *crq_hash; - - unsigned int max_queued; - -- atomic_t ref; -+ mempool_t *crq_pool; - -- int key_type; -+ int rq_in_driver; - -- mempool_t *crq_pool; -+ /* -+ * schedule slice state info -+ */ -+ /* -+ * idle window management -+ */ -+ struct timer_list idle_slice_timer; -+ struct work_struct unplug_work; - -- request_queue_t *queue; -+ struct cfq_queue *active_queue; -+ struct cfq_io_context *active_cic; -+ int cur_prio, cur_end_prio; -+ unsigned int dispatch_slice; -+ -+ struct timer_list idle_class_timer; - - sector_t last_sector; -+ unsigned long last_end_request; - -- int rq_in_driver; -+ unsigned int rq_starved; - - /* - * tunables, see top of file - */ - unsigned int cfq_quantum; - unsigned int cfq_queued; -- unsigned int cfq_fifo_expire_r; -- unsigned int cfq_fifo_expire_w; -- unsigned int cfq_fifo_batch_expire; -+ unsigned int cfq_fifo_expire[2]; - unsigned int cfq_back_penalty; - unsigned int cfq_back_max; -- unsigned int find_best_crq; -- -- unsigned int cfq_tagged; -+ unsigned int cfq_slice[2]; -+ unsigned int cfq_slice_async_rq; -+ unsigned int cfq_slice_idle; -+ unsigned int cfq_max_depth; - }; - -+/* -+ * Per process-grouping structure -+ */ - struct cfq_queue { - /* reference count */ - atomic_t ref; - /* parent cfq_data */ - struct cfq_data *cfqd; -- /* hash of mergeable requests */ -+ /* cfqq lookup hash */ - struct hlist_node cfq_hash; - /* hash key */ -- unsigned long key; -- /* whether queue is on rr (or empty) list */ -- int on_rr; -+ unsigned int key; - /* on either rr or empty list of cfqd */ - struct list_head cfq_list; - /* sorted list of pending requests */ -@@ -158,21 +203,22 @@ - /* currently allocated requests */ - int allocated[2]; - /* fifo list of requests in sort_list */ -- struct list_head fifo[2]; -- /* last time fifo expired */ -- unsigned long last_fifo_expire; -+ struct list_head fifo; - -- int key_type; -+ unsigned long slice_start; -+ unsigned long slice_end; -+ unsigned long slice_left; -+ unsigned long service_last; -+ -+ /* number of requests that are on the dispatch list */ -+ int on_dispatch[2]; -+ -+ /* io prio of this group */ -+ unsigned short ioprio, org_ioprio; -+ unsigned short ioprio_class, org_ioprio_class; - -- unsigned long service_start; -- unsigned long service_used; -- -- unsigned int max_rate; -- -- /* number of requests that have been handed to the driver */ -- int in_flight; -- /* number of currently allocated requests */ -- int alloc_limit[2]; -+ /* various state flags, see below */ -+ unsigned int flags; - }; - - struct cfq_rq { -@@ -184,42 +230,78 @@ - struct cfq_queue *cfq_queue; - struct cfq_io_context *io_context; - -- unsigned long service_start; -- unsigned long queue_start; -+ unsigned int crq_flags; -+}; -+ -+enum cfqq_state_flags { -+ CFQ_CFQQ_FLAG_on_rr = 0, -+ CFQ_CFQQ_FLAG_wait_request, -+ CFQ_CFQQ_FLAG_must_alloc, -+ CFQ_CFQQ_FLAG_must_alloc_slice, -+ CFQ_CFQQ_FLAG_must_dispatch, -+ CFQ_CFQQ_FLAG_fifo_expire, -+ CFQ_CFQQ_FLAG_idle_window, -+ CFQ_CFQQ_FLAG_prio_changed, -+ CFQ_CFQQ_FLAG_expired, -+}; -+ -+#define CFQ_CFQQ_FNS(name) \ -+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ -+{ \ -+ cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ -+} \ -+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ -+{ \ -+ cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ -+} \ -+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ -+{ \ -+ return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ -+} - -- unsigned int in_flight : 1; -- unsigned int accounted : 1; -- unsigned int is_sync : 1; -- unsigned int is_write : 1; -+CFQ_CFQQ_FNS(on_rr); -+CFQ_CFQQ_FNS(wait_request); -+CFQ_CFQQ_FNS(must_alloc); -+CFQ_CFQQ_FNS(must_alloc_slice); -+CFQ_CFQQ_FNS(must_dispatch); -+CFQ_CFQQ_FNS(fifo_expire); -+CFQ_CFQQ_FNS(idle_window); -+CFQ_CFQQ_FNS(prio_changed); -+CFQ_CFQQ_FNS(expired); -+#undef CFQ_CFQQ_FNS -+ -+enum cfq_rq_state_flags { -+ CFQ_CRQ_FLAG_in_flight = 0, -+ CFQ_CRQ_FLAG_in_driver, -+ CFQ_CRQ_FLAG_is_sync, -+ CFQ_CRQ_FLAG_requeued, - }; - --static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long); -+#define CFQ_CRQ_FNS(name) \ -+static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \ -+{ \ -+ crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \ -+} \ -+static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \ -+{ \ -+ crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \ -+} \ -+static inline int cfq_crq_##name(const struct cfq_rq *crq) \ -+{ \ -+ return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ -+} -+ -+CFQ_CRQ_FNS(in_flight); -+CFQ_CRQ_FNS(in_driver); -+CFQ_CRQ_FNS(is_sync); -+CFQ_CRQ_FNS(requeued); -+#undef CFQ_CRQ_FNS -+ -+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); - static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); --static void cfq_update_next_crq(struct cfq_rq *); - static void cfq_put_cfqd(struct cfq_data *cfqd); - --/* -- * what the fairness is based on (ie how processes are grouped and -- * differentiated) -- */ --static inline unsigned long --cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk) --{ -- /* -- * optimize this so that ->key_type is the offset into the struct -- */ -- switch (cfqd->key_type) { -- case CFQ_KEY_PGID: -- return process_group(tsk); -- default: -- case CFQ_KEY_TGID: -- return tsk->tgid; -- case CFQ_KEY_UID: -- return tsk->uid; -- case CFQ_KEY_GID: -- return tsk->gid; -- } --} -+#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) - - /* - * lots of deadline iosched dupes, can be abstracted later... -@@ -235,16 +317,12 @@ - - if (q->last_merge == crq->request) - q->last_merge = NULL; -- -- cfq_update_next_crq(crq); - } - - static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) - { - const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); - -- BUG_ON(!hlist_unhashed(&crq->hash)); -- - hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); - } - -@@ -257,8 +335,6 @@ - struct cfq_rq *crq = list_entry_hash(entry); - struct request *__rq = crq->request; - -- BUG_ON(hlist_unhashed(&crq->hash)); -- - if (!rq_mergeable(__rq)) { - cfq_del_crq_hash(crq); - continue; -@@ -271,6 +347,28 @@ - return NULL; - } - -+static inline int cfq_pending_requests(struct cfq_data *cfqd) -+{ -+ return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; -+} -+ -+/* -+ * scheduler run of queue, if there are requests pending and no one in the -+ * driver that will restart queueing -+ */ -+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) -+{ -+ if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) -+ kblockd_schedule_work(&cfqd->unplug_work); -+} -+ -+static int cfq_queue_empty(request_queue_t *q) -+{ -+ struct cfq_data *cfqd = q->elevator->elevator_data; -+ -+ return !cfq_pending_requests(cfqd); -+} -+ - /* - * Lifted from AS - choose which of crq1 and crq2 that is best served now. - * We choose the request that is closest to the head right now. Distance -@@ -288,35 +386,21 @@ - if (crq2 == NULL) - return crq1; - -+ if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2)) -+ return crq1; -+ else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1)) -+ return crq2; -+ -+ if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) -+ return crq1; -+ else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) -+ return crq2; -+ - s1 = crq1->request->sector; - s2 = crq2->request->sector; - - last = cfqd->last_sector; - --#if 0 -- if (!list_empty(&cfqd->queue->queue_head)) { -- struct list_head *entry = &cfqd->queue->queue_head; -- unsigned long distance = ~0UL; -- struct request *rq; -- -- while ((entry = entry->prev) != &cfqd->queue->queue_head) { -- rq = list_entry_rq(entry); -- -- if (blk_barrier_rq(rq)) -- break; -- -- if (distance < abs(s1 - rq->sector + rq->nr_sectors)) { -- distance = abs(s1 - rq->sector +rq->nr_sectors); -- last = rq->sector + rq->nr_sectors; -- } -- if (distance < abs(s2 - rq->sector + rq->nr_sectors)) { -- distance = abs(s2 - rq->sector +rq->nr_sectors); -- last = rq->sector + rq->nr_sectors; -- } -- } -- } --#endif -- - /* - * by definition, 1KiB is 2 sectors - */ -@@ -377,11 +461,14 @@ - struct cfq_rq *crq_next = NULL, *crq_prev = NULL; - struct rb_node *rbnext, *rbprev; - -- if (!ON_RB(&last->rb_node)) -- return NULL; -- -- if ((rbnext = rb_next(&last->rb_node)) == NULL) -+ rbnext = NULL; -+ if (ON_RB(&last->rb_node)) -+ rbnext = rb_next(&last->rb_node); -+ if (!rbnext) { - rbnext = rb_first(&cfqq->sort_list); -+ if (rbnext == &last->rb_node) -+ rbnext = NULL; -+ } - - rbprev = rb_prev(&last->rb_node); - -@@ -401,67 +488,53 @@ - cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); - } - --static int cfq_check_sort_rr_list(struct cfq_queue *cfqq) -+static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) - { -- struct list_head *head = &cfqq->cfqd->rr_list; -- struct list_head *next, *prev; -- -- /* -- * list might still be ordered -- */ -- next = cfqq->cfq_list.next; -- if (next != head) { -- struct cfq_queue *cnext = list_entry_cfqq(next); -+ struct cfq_data *cfqd = cfqq->cfqd; -+ struct list_head *list, *entry; - -- if (cfqq->service_used > cnext->service_used) -- return 1; -- } -+ BUG_ON(!cfq_cfqq_on_rr(cfqq)); - -- prev = cfqq->cfq_list.prev; -- if (prev != head) { -- struct cfq_queue *cprev = list_entry_cfqq(prev); -+ list_del(&cfqq->cfq_list); - -- if (cfqq->service_used < cprev->service_used) -- return 1; -+ if (cfq_class_rt(cfqq)) -+ list = &cfqd->cur_rr; -+ else if (cfq_class_idle(cfqq)) -+ list = &cfqd->idle_rr; -+ else { -+ /* -+ * if cfqq has requests in flight, don't allow it to be -+ * found in cfq_set_active_queue before it has finished them. -+ * this is done to increase fairness between a process that -+ * has lots of io pending vs one that only generates one -+ * sporadically or synchronously -+ */ -+ if (cfq_cfqq_dispatched(cfqq)) -+ list = &cfqd->busy_rr; -+ else -+ list = &cfqd->rr_list[cfqq->ioprio]; - } - -- return 0; --} -- --static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) --{ -- struct list_head *entry = &cfqq->cfqd->rr_list; -- -- if (!cfqq->on_rr) -- return; -- if (!new_queue && !cfq_check_sort_rr_list(cfqq)) -+ /* -+ * if queue was preempted, just add to front to be fair. busy_rr -+ * isn't sorted. -+ */ -+ if (preempted || list == &cfqd->busy_rr) { -+ list_add(&cfqq->cfq_list, list); - return; -- -- list_del(&cfqq->cfq_list); -+ } - - /* -- * sort by our mean service_used, sub-sort by in-flight requests -+ * sort by when queue was last serviced - */ -- while ((entry = entry->prev) != &cfqq->cfqd->rr_list) { -+ entry = list; -+ while ((entry = entry->prev) != list) { - struct cfq_queue *__cfqq = list_entry_cfqq(entry); - -- if (cfqq->service_used > __cfqq->service_used) -+ if (!__cfqq->service_last) -+ break; -+ if (time_before(__cfqq->service_last, cfqq->service_last)) - break; -- else if (cfqq->service_used == __cfqq->service_used) { -- struct list_head *prv; -- -- while ((prv = entry->prev) != &cfqq->cfqd->rr_list) { -- __cfqq = list_entry_cfqq(prv); -- -- WARN_ON(__cfqq->service_used > cfqq->service_used); -- if (cfqq->service_used != __cfqq->service_used) -- break; -- if (cfqq->in_flight > __cfqq->in_flight) -- break; -- -- entry = prv; -- } -- } - } - - list_add(&cfqq->cfq_list, entry); -@@ -469,28 +542,24 @@ - - /* - * add to busy list of queues for service, trying to be fair in ordering -- * the pending list according to requests serviced -+ * the pending list according to last request service - */ - static inline void --cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) -+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) - { -- /* -- * it's currently on the empty list -- */ -- cfqq->on_rr = 1; -+ BUG_ON(cfq_cfqq_on_rr(cfqq)); -+ cfq_mark_cfqq_on_rr(cfqq); - cfqd->busy_queues++; - -- if (time_after(jiffies, cfqq->service_start + cfq_service)) -- cfqq->service_used >>= 3; -- -- cfq_sort_rr_list(cfqq, 1); -+ cfq_resort_rr_list(cfqq, requeue); - } - - static inline void - cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) - { -+ BUG_ON(!cfq_cfqq_on_rr(cfqq)); -+ cfq_clear_cfqq_on_rr(cfqq); - list_move(&cfqq->cfq_list, &cfqd->empty_list); -- cfqq->on_rr = 0; - - BUG_ON(!cfqd->busy_queues); - cfqd->busy_queues--; -@@ -505,16 +574,17 @@ - - if (ON_RB(&crq->rb_node)) { - struct cfq_data *cfqd = cfqq->cfqd; -+ const int sync = cfq_crq_is_sync(crq); - -- BUG_ON(!cfqq->queued[crq->is_sync]); -+ BUG_ON(!cfqq->queued[sync]); -+ cfqq->queued[sync]--; - - cfq_update_next_crq(crq); - -- cfqq->queued[crq->is_sync]--; - rb_erase(&crq->rb_node, &cfqq->sort_list); - RB_CLEAR_COLOR(&crq->rb_node); - -- if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr) -+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) - cfq_del_cfqq_rr(cfqd, cfqq); - } - } -@@ -550,7 +620,7 @@ - struct cfq_rq *__alias; - - crq->rb_key = rq_rb_key(rq); -- cfqq->queued[crq->is_sync]++; -+ cfqq->queued[cfq_crq_is_sync(crq)]++; - - /* - * looks a little odd, but the first insert might return an alias. -@@ -561,8 +631,8 @@ - - rb_insert_color(&crq->rb_node, &cfqq->sort_list); - -- if (!cfqq->on_rr) -- cfq_add_cfqq_rr(cfqd, cfqq); -+ if (!cfq_cfqq_on_rr(cfqq)) -+ cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); - - /* - * check if this request is a better next-serve candidate -@@ -575,17 +645,16 @@ - { - if (ON_RB(&crq->rb_node)) { - rb_erase(&crq->rb_node, &cfqq->sort_list); -- cfqq->queued[crq->is_sync]--; -+ cfqq->queued[cfq_crq_is_sync(crq)]--; - } - - cfq_add_crq_rb(crq); - } - --static struct request * --cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) -+static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) -+ - { -- const unsigned long key = cfq_hash_key(cfqd, current); -- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key); -+ struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); - struct rb_node *n; - - if (!cfqq) -@@ -609,20 +678,25 @@ - - static void cfq_deactivate_request(request_queue_t *q, struct request *rq) - { -+ struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_rq *crq = RQ_DATA(rq); - - if (crq) { - struct cfq_queue *cfqq = crq->cfq_queue; - -- if (cfqq->cfqd->cfq_tagged) { -- cfqq->service_used--; -- cfq_sort_rr_list(cfqq, 0); -+ if (cfq_crq_in_driver(crq)) { -+ cfq_clear_crq_in_driver(crq); -+ WARN_ON(!cfqd->rq_in_driver); -+ cfqd->rq_in_driver--; - } -+ if (cfq_crq_in_flight(crq)) { -+ const int sync = cfq_crq_is_sync(crq); - -- if (crq->accounted) { -- crq->accounted = 0; -- cfqq->cfqd->rq_in_driver--; -+ cfq_clear_crq_in_flight(crq); -+ WARN_ON(!cfqq->on_dispatch[sync]); -+ cfqq->on_dispatch[sync]--; - } -+ cfq_mark_crq_requeued(crq); - } - } - -@@ -640,11 +714,10 @@ - struct cfq_rq *crq = RQ_DATA(rq); - - if (crq) { -- cfq_remove_merge_hints(q, crq); - list_del_init(&rq->queuelist); -+ cfq_del_crq_rb(crq); -+ cfq_remove_merge_hints(q, crq); - -- if (crq->cfq_queue) -- cfq_del_crq_rb(crq); - } - } - -@@ -662,21 +735,15 @@ - } - - __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); -- if (__rq) { -- BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); -- -- if (elv_rq_merge_ok(__rq, bio)) { -- ret = ELEVATOR_BACK_MERGE; -- goto out; -- } -+ if (__rq && elv_rq_merge_ok(__rq, bio)) { -+ ret = ELEVATOR_BACK_MERGE; -+ goto out; - } - - __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); -- if (__rq) { -- if (elv_rq_merge_ok(__rq, bio)) { -- ret = ELEVATOR_FRONT_MERGE; -- goto out; -- } -+ if (__rq && elv_rq_merge_ok(__rq, bio)) { -+ ret = ELEVATOR_FRONT_MERGE; -+ goto out; - } - - return ELEVATOR_NO_MERGE; -@@ -709,235 +776,496 @@ - cfq_merged_requests(request_queue_t *q, struct request *rq, - struct request *next) - { -- struct cfq_rq *crq = RQ_DATA(rq); -- struct cfq_rq *cnext = RQ_DATA(next); -- - cfq_merged_request(q, rq); - -- if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { -- if (time_before(cnext->queue_start, crq->queue_start)) { -- list_move(&rq->queuelist, &next->queuelist); -- crq->queue_start = cnext->queue_start; -- } -- } -+ /* -+ * reposition in fifo if next is older than rq -+ */ -+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && -+ time_before(next->start_time, rq->start_time)) -+ list_move(&rq->queuelist, &next->queuelist); - -- cfq_update_next_crq(cnext); - cfq_remove_request(q, next); - } - -+static inline void -+__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) -+{ -+ if (cfqq) { -+ /* -+ * stop potential idle class queues waiting service -+ */ -+ del_timer(&cfqd->idle_class_timer); -+ -+ cfqq->slice_start = jiffies; -+ cfqq->slice_end = 0; -+ cfqq->slice_left = 0; -+ cfq_clear_cfqq_must_alloc_slice(cfqq); -+ cfq_clear_cfqq_fifo_expire(cfqq); -+ cfq_clear_cfqq_expired(cfqq); -+ } -+ -+ cfqd->active_queue = cfqq; -+} -+ - /* -- * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, -- * this function sector sorts the selected request to minimize seeks. we start -- * at cfqd->last_sector, not 0. -+ * 0 -+ * 0,1 -+ * 0,1,2 -+ * 0,1,2,3 -+ * 0,1,2,3,4 -+ * 0,1,2,3,4,5 -+ * 0,1,2,3,4,5,6 -+ * 0,1,2,3,4,5,6,7 - */ --static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) -+static int cfq_get_next_prio_level(struct cfq_data *cfqd) - { -- struct cfq_data *cfqd = q->elevator->elevator_data; -- struct cfq_queue *cfqq = crq->cfq_queue; -- struct list_head *head = &q->queue_head, *entry = head; -- struct request *__rq; -- sector_t last; -- -- cfq_del_crq_rb(crq); -- cfq_remove_merge_hints(q, crq); -- list_del(&crq->request->queuelist); -+ int prio, wrap; - -- last = cfqd->last_sector; -- while ((entry = entry->prev) != head) { -- __rq = list_entry_rq(entry); -+ prio = -1; -+ wrap = 0; -+ do { -+ int p; - -- if (blk_barrier_rq(crq->request)) -- break; -- if (!blk_fs_request(crq->request)) -- break; -+ for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { -+ if (!list_empty(&cfqd->rr_list[p])) { -+ prio = p; -+ break; -+ } -+ } - -- if (crq->request->sector > __rq->sector) -- break; -- if (__rq->sector > last && crq->request->sector < last) { -- last = crq->request->sector; -+ if (prio != -1) - break; -+ cfqd->cur_prio = 0; -+ if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { -+ cfqd->cur_end_prio = 0; -+ if (wrap) -+ break; -+ wrap = 1; - } -- } -+ } while (1); - -- cfqd->last_sector = last; -- crq->in_flight = 1; -- cfqq->in_flight++; -- list_add(&crq->request->queuelist, entry); --} -+ if (unlikely(prio == -1)) -+ return -1; - --/* -- * return expired entry, or NULL to just start from scratch in rbtree -- */ --static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) --{ -- struct cfq_data *cfqd = cfqq->cfqd; -- const int reads = !list_empty(&cfqq->fifo[0]); -- const int writes = !list_empty(&cfqq->fifo[1]); -- unsigned long now = jiffies; -- struct cfq_rq *crq; -+ BUG_ON(prio >= CFQ_PRIO_LISTS); - -- if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire)) -- return NULL; -+ list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); - -- crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist)); -- if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) { -- cfqq->last_fifo_expire = now; -- return crq; -+ cfqd->cur_prio = prio + 1; -+ if (cfqd->cur_prio > cfqd->cur_end_prio) { -+ cfqd->cur_end_prio = cfqd->cur_prio; -+ cfqd->cur_prio = 0; - } -- -- crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist)); -- if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) { -- cfqq->last_fifo_expire = now; -- return crq; -+ if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { -+ cfqd->cur_prio = 0; -+ cfqd->cur_end_prio = 0; - } - -- return NULL; -+ return prio; - } - --/* -- * dispatch a single request from given queue -- */ --static inline void --cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd, -- struct cfq_queue *cfqq) -+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) - { -- struct cfq_rq *crq; -+ struct cfq_queue *cfqq; - - /* -- * follow expired path, else get first next available -+ * if current queue is expired but not done with its requests yet, -+ * wait for that to happen - */ -- if ((crq = cfq_check_fifo(cfqq)) == NULL) { -- if (cfqd->find_best_crq) -- crq = cfqq->next_crq; -- else -- crq = rb_entry_crq(rb_first(&cfqq->sort_list)); -+ if ((cfqq = cfqd->active_queue) != NULL) { -+ if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq)) -+ return NULL; - } - -- cfqd->last_sector = crq->request->sector + crq->request->nr_sectors; -+ /* -+ * if current list is non-empty, grab first entry. if it is empty, -+ * get next prio level and grab first entry then if any are spliced -+ */ -+ if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) -+ cfqq = list_entry_cfqq(cfqd->cur_rr.next); - - /* -- * finally, insert request into driver list -+ * if we have idle queues and no rt or be queues had pending -+ * requests, either allow immediate service if the grace period -+ * has passed or arm the idle grace timer - */ -- cfq_dispatch_sort(q, crq); -+ if (!cfqq && !list_empty(&cfqd->idle_rr)) { -+ unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; -+ -+ if (time_after_eq(jiffies, end)) -+ cfqq = list_entry_cfqq(cfqd->idle_rr.next); -+ else -+ mod_timer(&cfqd->idle_class_timer, end); -+ } -+ -+ __cfq_set_active_queue(cfqd, cfqq); -+ return cfqq; - } - --static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch) -+/* -+ * current cfqq expired its slice (or was too idle), select new one -+ */ -+static void -+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, -+ int preempted) - { -- struct cfq_data *cfqd = q->elevator->elevator_data; -- struct cfq_queue *cfqq; -- struct list_head *entry, *tmp; -- int queued, busy_queues, first_round; -- -- if (list_empty(&cfqd->rr_list)) -- return 0; -+ unsigned long now = jiffies; - -- queued = 0; -- first_round = 1; --restart: -- busy_queues = 0; -- list_for_each_safe(entry, tmp, &cfqd->rr_list) { -- cfqq = list_entry_cfqq(entry); -+ if (cfq_cfqq_wait_request(cfqq)) -+ del_timer(&cfqd->idle_slice_timer); - -- BUG_ON(RB_EMPTY(&cfqq->sort_list)); -+ if (!preempted && !cfq_cfqq_dispatched(cfqq)) -+ cfqq->service_last = now; - -- /* -- * first round of queueing, only select from queues that -- * don't already have io in-flight -- */ -- if (first_round && cfqq->in_flight) -- continue; -+ cfq_clear_cfqq_must_dispatch(cfqq); -+ cfq_clear_cfqq_wait_request(cfqq); - -- cfq_dispatch_request(q, cfqd, cfqq); -+ /* -+ * store what was left of this slice, if the queue idled out -+ * or was preempted -+ */ -+ if (time_after(cfqq->slice_end, now)) -+ cfqq->slice_left = cfqq->slice_end - now; -+ else -+ cfqq->slice_left = 0; - -- if (!RB_EMPTY(&cfqq->sort_list)) -- busy_queues++; -+ if (cfq_cfqq_on_rr(cfqq)) -+ cfq_resort_rr_list(cfqq, preempted); - -- queued++; -- } -+ if (cfqq == cfqd->active_queue) -+ cfqd->active_queue = NULL; - -- if ((queued < max_dispatch) && (busy_queues || first_round)) { -- first_round = 0; -- goto restart; -+ if (cfqd->active_cic) { -+ put_io_context(cfqd->active_cic->ioc); -+ cfqd->active_cic = NULL; - } - -- return queued; -+ cfqd->dispatch_slice = 0; - } - --static inline void cfq_account_dispatch(struct cfq_rq *crq) -+static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) - { -- struct cfq_queue *cfqq = crq->cfq_queue; -- struct cfq_data *cfqd = cfqq->cfqd; -- unsigned long now, elapsed; -+ struct cfq_queue *cfqq = cfqd->active_queue; - -- if (!blk_fs_request(crq->request)) -- return; -+ if (cfqq) { -+ /* -+ * use deferred expiry, if there are requests in progress as -+ * not to disturb the slice of the next queue -+ */ -+ if (cfq_cfqq_dispatched(cfqq)) -+ cfq_mark_cfqq_expired(cfqq); -+ else -+ __cfq_slice_expired(cfqd, cfqq, preempted); -+ } -+} - -- /* -- * accounted bit is necessary since some drivers will call -- * elv_next_request() many times for the same request (eg ide) -- */ -- if (crq->accounted) -- return; -+static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) - -- now = jiffies; -- if (cfqq->service_start == ~0UL) -- cfqq->service_start = now; -+{ -+ WARN_ON(!RB_EMPTY(&cfqq->sort_list)); -+ WARN_ON(cfqq != cfqd->active_queue); - - /* -- * on drives with tagged command queueing, command turn-around time -- * doesn't necessarily reflect the time spent processing this very -- * command inside the drive. so do the accounting differently there, -- * by just sorting on the number of requests -- */ -- if (cfqd->cfq_tagged) { -- if (time_after(now, cfqq->service_start + cfq_service)) { -- cfqq->service_start = now; -- cfqq->service_used /= 10; -- } -- -- cfqq->service_used++; -- cfq_sort_rr_list(cfqq, 0); -- } -+ * idle is disabled, either manually or by past process history -+ */ -+ if (!cfqd->cfq_slice_idle) -+ return 0; -+ if (!cfq_cfqq_idle_window(cfqq)) -+ return 0; -+ /* -+ * task has exited, don't wait -+ */ -+ if (cfqd->active_cic && !cfqd->active_cic->ioc->task) -+ return 0; -+ -+ cfq_mark_cfqq_must_dispatch(cfqq); -+ cfq_mark_cfqq_wait_request(cfqq); -+ -+ if (!timer_pending(&cfqd->idle_slice_timer)) { -+ unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); -+ -+ cfqd->idle_slice_timer.expires = jiffies + slice_left; -+ add_timer(&cfqd->idle_slice_timer); -+ } -+ -+ return 1; -+} -+ -+/* -+ * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, -+ * this function sector sorts the selected request to minimize seeks. we start -+ * at cfqd->last_sector, not 0. -+ */ -+static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) -+{ -+ struct cfq_data *cfqd = q->elevator->elevator_data; -+ struct cfq_queue *cfqq = crq->cfq_queue; -+ struct list_head *head = &q->queue_head, *entry = head; -+ struct request *__rq; -+ sector_t last; -+ -+ list_del(&crq->request->queuelist); -+ -+ last = cfqd->last_sector; -+ list_for_each_entry_reverse(__rq, head, queuelist) { -+ struct cfq_rq *__crq = RQ_DATA(__rq); -+ -+ if (blk_barrier_rq(__rq)) -+ break; -+ if (!blk_fs_request(__rq)) -+ break; -+ if (cfq_crq_requeued(__crq)) -+ break; -+ -+ if (__rq->sector <= crq->request->sector) -+ break; -+ if (__rq->sector > last && crq->request->sector < last) { -+ last = crq->request->sector + crq->request->nr_sectors; -+ break; -+ } -+ entry = &__rq->queuelist; -+ } -+ -+ cfqd->last_sector = last; -+ -+ cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); - -- elapsed = now - crq->queue_start; -- if (elapsed > max_elapsed_dispatch) -- max_elapsed_dispatch = elapsed; -+ cfq_del_crq_rb(crq); -+ cfq_remove_merge_hints(q, crq); -+ -+ cfq_mark_crq_in_flight(crq); -+ cfq_clear_crq_requeued(crq); -+ -+ cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; -+ list_add_tail(&crq->request->queuelist, entry); -+} -+ -+/* -+ * return expired entry, or NULL to just start from scratch in rbtree -+ */ -+static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) -+{ -+ struct cfq_data *cfqd = cfqq->cfqd; -+ struct request *rq; -+ struct cfq_rq *crq; - -- crq->accounted = 1; -- crq->service_start = now; -+ if (cfq_cfqq_fifo_expire(cfqq)) -+ return NULL; -+ -+ if (!list_empty(&cfqq->fifo)) { -+ int fifo = cfq_cfqq_class_sync(cfqq); - -- if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) { -- cfqq->cfqd->cfq_tagged = 1; -- printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG); -+ crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next)); -+ rq = crq->request; -+ if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { -+ cfq_mark_cfqq_fifo_expire(cfqq); -+ return crq; -+ } - } -+ -+ return NULL; -+} -+ -+/* -+ * Scale schedule slice based on io priority. Use the sync time slice only -+ * if a queue is marked sync and has sync io queued. A sync queue with async -+ * io only, should not get full sync slice length. -+ */ -+static inline int -+cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) -+{ -+ const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; -+ -+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); -+ -+ return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); -+} -+ -+static inline void -+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) -+{ -+ cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; -+} -+ -+static inline int -+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) -+{ -+ const int base_rq = cfqd->cfq_slice_async_rq; -+ -+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); -+ -+ return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); -+} -+ -+/* -+ * get next queue for service -+ */ -+static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force) -+{ -+ unsigned long now = jiffies; -+ struct cfq_queue *cfqq; -+ -+ cfqq = cfqd->active_queue; -+ if (!cfqq) -+ goto new_queue; -+ -+ if (cfq_cfqq_expired(cfqq)) -+ goto new_queue; -+ -+ /* -+ * slice has expired -+ */ -+ if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end)) -+ goto expire; -+ -+ /* -+ * if queue has requests, dispatch one. if not, check if -+ * enough slice is left to wait for one -+ */ -+ if (!RB_EMPTY(&cfqq->sort_list)) -+ goto keep_queue; -+ else if (!force && cfq_cfqq_class_sync(cfqq) && -+ time_before(now, cfqq->slice_end)) { -+ if (cfq_arm_slice_timer(cfqd, cfqq)) -+ return NULL; -+ } -+ -+expire: -+ cfq_slice_expired(cfqd, 0); -+new_queue: -+ cfqq = cfq_set_active_queue(cfqd); -+keep_queue: -+ return cfqq; -+} -+ -+static int -+__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, -+ int max_dispatch) -+{ -+ int dispatched = 0; -+ -+ BUG_ON(RB_EMPTY(&cfqq->sort_list)); -+ -+ do { -+ struct cfq_rq *crq; -+ -+ /* -+ * follow expired path, else get first next available -+ */ -+ if ((crq = cfq_check_fifo(cfqq)) == NULL) -+ crq = cfqq->next_crq; -+ -+ /* -+ * finally, insert request into driver dispatch list -+ */ -+ cfq_dispatch_sort(cfqd->queue, crq); -+ -+ cfqd->dispatch_slice++; -+ dispatched++; -+ -+ if (!cfqd->active_cic) { -+ atomic_inc(&crq->io_context->ioc->refcount); -+ cfqd->active_cic = crq->io_context; -+ } -+ -+ if (RB_EMPTY(&cfqq->sort_list)) -+ break; -+ -+ } while (dispatched < max_dispatch); -+ -+ /* -+ * if slice end isn't set yet, set it. if at least one request was -+ * sync, use the sync time slice value -+ */ -+ if (!cfqq->slice_end) -+ cfq_set_prio_slice(cfqd, cfqq); -+ -+ /* -+ * expire an async queue immediately if it has used up its slice. idle -+ * queue always expire after 1 dispatch round. -+ */ -+ if ((!cfq_cfqq_sync(cfqq) && -+ cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || -+ cfq_class_idle(cfqq)) -+ cfq_slice_expired(cfqd, 0); -+ -+ return dispatched; -+} -+ -+static int -+cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) -+{ -+ struct cfq_data *cfqd = q->elevator->elevator_data; -+ struct cfq_queue *cfqq; -+ -+ if (!cfqd->busy_queues) -+ return 0; -+ -+ cfqq = cfq_select_queue(cfqd, force); -+ if (cfqq) { -+ cfq_clear_cfqq_must_dispatch(cfqq); -+ cfq_clear_cfqq_wait_request(cfqq); -+ del_timer(&cfqd->idle_slice_timer); -+ -+ if (cfq_class_idle(cfqq)) -+ max_dispatch = 1; -+ -+ return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); -+ } -+ -+ return 0; -+} -+ -+static inline void cfq_account_dispatch(struct cfq_rq *crq) -+{ -+ struct cfq_queue *cfqq = crq->cfq_queue; -+ struct cfq_data *cfqd = cfqq->cfqd; -+ -+ if (unlikely(!blk_fs_request(crq->request))) -+ return; -+ -+ /* -+ * accounted bit is necessary since some drivers will call -+ * elv_next_request() many times for the same request (eg ide) -+ */ -+ if (cfq_crq_in_driver(crq)) -+ return; -+ -+ cfq_mark_crq_in_driver(crq); -+ cfqd->rq_in_driver++; - } - - static inline void - cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) - { - struct cfq_data *cfqd = cfqq->cfqd; -+ unsigned long now; - -- if (!crq->accounted) -+ if (!cfq_crq_in_driver(crq)) - return; - -+ now = jiffies; -+ - WARN_ON(!cfqd->rq_in_driver); - cfqd->rq_in_driver--; - -- if (!cfqd->cfq_tagged) { -- unsigned long now = jiffies; -- unsigned long duration = now - crq->service_start; -+ if (!cfq_class_idle(cfqq)) -+ cfqd->last_end_request = now; - -- if (time_after(now, cfqq->service_start + cfq_service)) { -- cfqq->service_start = now; -- cfqq->service_used >>= 3; -+ if (!cfq_cfqq_dispatched(cfqq)) { -+ if (cfq_cfqq_on_rr(cfqq)) { -+ cfqq->service_last = now; -+ cfq_resort_rr_list(cfqq, 0); -+ } -+ if (cfq_cfqq_expired(cfqq)) { -+ __cfq_slice_expired(cfqd, cfqq, 0); -+ cfq_schedule_dispatch(cfqd); - } -- -- cfqq->service_used += duration; -- cfq_sort_rr_list(cfqq, 0); -- -- if (duration > max_elapsed_crq) -- max_elapsed_crq = duration; - } -+ -+ if (cfq_crq_is_sync(crq)) -+ crq->io_context->last_end_request = now; - } - - static struct request *cfq_next_request(request_queue_t *q) -@@ -950,7 +1278,19 @@ - dispatch: - rq = list_entry_rq(q->queue_head.next); - -- if ((crq = RQ_DATA(rq)) != NULL) { -+ crq = RQ_DATA(rq); -+ if (crq) { -+ struct cfq_queue *cfqq = crq->cfq_queue; -+ -+ /* -+ * if idle window is disabled, allow queue buildup -+ */ -+ if (!cfq_crq_in_driver(crq) && -+ !cfq_cfqq_idle_window(cfqq) && -+ !blk_barrier_rq(rq) && -+ cfqd->rq_in_driver >= cfqd->cfq_max_depth) -+ return NULL; -+ - cfq_remove_merge_hints(q, crq); - cfq_account_dispatch(crq); - } -@@ -958,7 +1298,7 @@ - return rq; - } - -- if (cfq_dispatch_requests(q, cfqd->cfq_quantum)) -+ if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0)) - goto dispatch; - - return NULL; -@@ -972,13 +1312,21 @@ - */ - static void cfq_put_queue(struct cfq_queue *cfqq) - { -- BUG_ON(!atomic_read(&cfqq->ref)); -+ struct cfq_data *cfqd = cfqq->cfqd; -+ -+ BUG_ON(atomic_read(&cfqq->ref) <= 0); - - if (!atomic_dec_and_test(&cfqq->ref)) - return; - - BUG_ON(rb_first(&cfqq->sort_list)); -- BUG_ON(cfqq->on_rr); -+ BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); -+ BUG_ON(cfq_cfqq_on_rr(cfqq)); -+ -+ if (unlikely(cfqd->active_queue == cfqq)) { -+ __cfq_slice_expired(cfqd, cfqq, 0); -+ cfq_schedule_dispatch(cfqd); -+ } - - cfq_put_cfqd(cfqq->cfqd); - -@@ -991,15 +1339,17 @@ - } - - static inline struct cfq_queue * --__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) -+__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, -+ const int hashval) - { - struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; - struct hlist_node *entry, *next; - - hlist_for_each_safe(entry, next, hash_list) { - struct cfq_queue *__cfqq = list_entry_qhash(entry); -+ const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); - -- if (__cfqq->key == key) -+ if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) - return __cfqq; - } - -@@ -1007,94 +1357,220 @@ - } - - static struct cfq_queue * --cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key) -+cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) - { -- return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT)); -+ return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); - } - --static inline void --cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq, -- struct cfq_io_context *cic) -+static void cfq_free_io_context(struct cfq_io_context *cic) - { -- unsigned long hashkey = cfq_hash_key(cfqd, current); -- unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT); -- struct cfq_queue *__cfqq; -- unsigned long flags; -- -- spin_lock_irqsave(cfqd->queue->queue_lock, flags); -+ struct cfq_io_context *__cic; -+ struct list_head *entry, *next; - -- hlist_del(&(*cfqq)->cfq_hash); -- -- __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval); -- if (!__cfqq || __cfqq == *cfqq) { -- __cfqq = *cfqq; -- hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); -- __cfqq->key_type = cfqd->key_type; -- } else { -- atomic_inc(&__cfqq->ref); -- cic->cfqq = __cfqq; -- cfq_put_queue(*cfqq); -- *cfqq = __cfqq; -+ list_for_each_safe(entry, next, &cic->list) { -+ __cic = list_entry(entry, struct cfq_io_context, list); -+ kmem_cache_free(cfq_ioc_pool, __cic); - } - -- cic->cfqq = __cfqq; -- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); -+ kmem_cache_free(cfq_ioc_pool, cic); - } - --static void cfq_free_io_context(struct cfq_io_context *cic) -+/* -+ * Called with interrupts disabled -+ */ -+static void cfq_exit_single_io_context(struct cfq_io_context *cic) - { -- kmem_cache_free(cfq_ioc_pool, cic); -+ struct cfq_data *cfqd = cic->cfqq->cfqd; -+ request_queue_t *q = cfqd->queue; -+ -+ WARN_ON(!irqs_disabled()); -+ -+ spin_lock(q->queue_lock); -+ -+ if (unlikely(cic->cfqq == cfqd->active_queue)) { -+ __cfq_slice_expired(cfqd, cic->cfqq, 0); -+ cfq_schedule_dispatch(cfqd); -+ } -+ -+ cfq_put_queue(cic->cfqq); -+ cic->cfqq = NULL; -+ spin_unlock(q->queue_lock); - } - - /* -- * locking hierarchy is: io_context lock -> queue locks -+ * Another task may update the task cic list, if it is doing a queue lookup -+ * on its behalf. cfq_cic_lock excludes such concurrent updates - */ - static void cfq_exit_io_context(struct cfq_io_context *cic) - { -- struct cfq_queue *cfqq = cic->cfqq; -- struct list_head *entry = &cic->list; -- request_queue_t *q; -+ struct cfq_io_context *__cic; -+ struct list_head *entry; - unsigned long flags; - -+ local_irq_save(flags); -+ - /* - * put the reference this task is holding to the various queues - */ -- spin_lock_irqsave(&cic->ioc->lock, flags); -- while ((entry = cic->list.next) != &cic->list) { -- struct cfq_io_context *__cic; -- -+ list_for_each(entry, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); -- list_del(entry); -- -- q = __cic->cfqq->cfqd->queue; -- spin_lock(q->queue_lock); -- cfq_put_queue(__cic->cfqq); -- spin_unlock(q->queue_lock); -+ cfq_exit_single_io_context(__cic); - } - -- q = cfqq->cfqd->queue; -- spin_lock(q->queue_lock); -- cfq_put_queue(cfqq); -- spin_unlock(q->queue_lock); -- -- cic->cfqq = NULL; -- spin_unlock_irqrestore(&cic->ioc->lock, flags); -+ cfq_exit_single_io_context(cic); -+ local_irq_restore(flags); - } - --static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) -+static struct cfq_io_context * -+cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) - { -- struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags); -+ struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); - - if (cic) { -- cic->dtor = cfq_free_io_context; -- cic->exit = cfq_exit_io_context; - INIT_LIST_HEAD(&cic->list); - cic->cfqq = NULL; -+ cic->key = NULL; -+ cic->last_end_request = jiffies; -+ cic->ttime_total = 0; -+ cic->ttime_samples = 0; -+ cic->ttime_mean = 0; -+ cic->dtor = cfq_free_io_context; -+ cic->exit = cfq_exit_io_context; - } - - return cic; - } - -+static void cfq_init_prio_data(struct cfq_queue *cfqq) -+{ -+ struct task_struct *tsk = current; -+ int ioprio_class; -+ -+ if (!cfq_cfqq_prio_changed(cfqq)) -+ return; -+ -+ ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); -+ switch (ioprio_class) { -+ default: -+ printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); -+ case IOPRIO_CLASS_NONE: -+ /* -+ * no prio set, place us in the middle of the BE classes -+ */ -+ cfqq->ioprio = task_nice_ioprio(tsk); -+ cfqq->ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_RT: -+ cfqq->ioprio = task_ioprio(tsk); -+ cfqq->ioprio_class = IOPRIO_CLASS_RT; -+ break; -+ case IOPRIO_CLASS_BE: -+ cfqq->ioprio = task_ioprio(tsk); -+ cfqq->ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_IDLE: -+ cfqq->ioprio_class = IOPRIO_CLASS_IDLE; -+ cfqq->ioprio = 7; -+ cfq_clear_cfqq_idle_window(cfqq); -+ break; -+ } -+ -+ /* -+ * keep track of original prio settings in case we have to temporarily -+ * elevate the priority of this queue -+ */ -+ cfqq->org_ioprio = cfqq->ioprio; -+ cfqq->org_ioprio_class = cfqq->ioprio_class; -+ -+ if (cfq_cfqq_on_rr(cfqq)) -+ cfq_resort_rr_list(cfqq, 0); -+ -+ cfq_clear_cfqq_prio_changed(cfqq); -+} -+ -+static inline void changed_ioprio(struct cfq_queue *cfqq) -+{ -+ if (cfqq) { -+ struct cfq_data *cfqd = cfqq->cfqd; -+ -+ spin_lock(cfqd->queue->queue_lock); -+ cfq_mark_cfqq_prio_changed(cfqq); -+ cfq_init_prio_data(cfqq); -+ spin_unlock(cfqd->queue->queue_lock); -+ } -+} -+ -+/* -+ * callback from sys_ioprio_set, irqs are disabled -+ */ -+static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) -+{ -+ struct cfq_io_context *cic = ioc->cic; -+ -+ changed_ioprio(cic->cfqq); -+ -+ list_for_each_entry(cic, &cic->list, list) -+ changed_ioprio(cic->cfqq); -+ -+ return 0; -+} -+ -+static struct cfq_queue * -+cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, -+ int gfp_mask) -+{ -+ const int hashval = hash_long(key, CFQ_QHASH_SHIFT); -+ struct cfq_queue *cfqq, *new_cfqq = NULL; -+ -+retry: -+ cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); -+ -+ if (!cfqq) { -+ if (new_cfqq) { -+ cfqq = new_cfqq; -+ new_cfqq = NULL; -+ } else { -+ spin_unlock_irq(cfqd->queue->queue_lock); -+ new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); -+ spin_lock_irq(cfqd->queue->queue_lock); -+ -+ if (!new_cfqq && !(gfp_mask & __GFP_WAIT)) -+ goto out; -+ -+ goto retry; -+ } -+ -+ memset(cfqq, 0, sizeof(*cfqq)); -+ -+ INIT_HLIST_NODE(&cfqq->cfq_hash); -+ INIT_LIST_HEAD(&cfqq->cfq_list); -+ RB_CLEAR_ROOT(&cfqq->sort_list); -+ INIT_LIST_HEAD(&cfqq->fifo); -+ -+ cfqq->key = key; -+ hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); -+ atomic_set(&cfqq->ref, 0); -+ cfqq->cfqd = cfqd; -+ atomic_inc(&cfqd->ref); -+ cfqq->service_last = 0; -+ /* -+ * set ->slice_left to allow preemption for a new process -+ */ -+ cfqq->slice_left = 2 * cfqd->cfq_slice_idle; -+ cfq_mark_cfqq_idle_window(cfqq); -+ cfq_mark_cfqq_prio_changed(cfqq); -+ cfq_init_prio_data(cfqq); -+ } -+ -+ if (new_cfqq) -+ kmem_cache_free(cfq_pool, new_cfqq); -+ -+ atomic_inc(&cfqq->ref); -+out: -+ WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); -+ return cfqq; -+} -+ - /* - * Setup general io context and cfq io context. There can be several cfq - * io contexts per general io context, if this process is doing io to more -@@ -1102,39 +1578,39 @@ - * cfqq, so we don't need to worry about it disappearing - */ - static struct cfq_io_context * --cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) -+cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) - { -- struct cfq_data *cfqd = (*cfqq)->cfqd; -- struct cfq_queue *__cfqq = *cfqq; -+ struct io_context *ioc = NULL; - struct cfq_io_context *cic; -- struct io_context *ioc; - -- might_sleep_if(gfp_flags & __GFP_WAIT); -+ might_sleep_if(gfp_mask & __GFP_WAIT); - -- ioc = get_io_context(gfp_flags); -+ ioc = get_io_context(gfp_mask); - if (!ioc) - return NULL; - - if ((cic = ioc->cic) == NULL) { -- cic = cfq_alloc_io_context(gfp_flags); -+ cic = cfq_alloc_io_context(cfqd, gfp_mask); - - if (cic == NULL) - goto err; - -+ /* -+ * manually increment generic io_context usage count, it -+ * cannot go away since we are already holding one ref to it -+ */ - ioc->cic = cic; -+ ioc->set_ioprio = cfq_ioc_set_ioprio; - cic->ioc = ioc; -- cic->cfqq = __cfqq; -- atomic_inc(&__cfqq->ref); -+ cic->key = cfqd; -+ atomic_inc(&cfqd->ref); - } else { - struct cfq_io_context *__cic; -- unsigned long flags; - - /* -- * since the first cic on the list is actually the head -- * itself, need to check this here or we'll duplicate an -- * cic per ioc for no reason -+ * the first cic on the list is actually the head itself - */ -- if (cic->cfqq == __cfqq) -+ if (cic->key == cfqd) - goto out; - - /* -@@ -1142,152 +1618,255 @@ - * should be ok here, the list will usually not be more than - * 1 or a few entries long - */ -- spin_lock_irqsave(&ioc->lock, flags); - list_for_each_entry(__cic, &cic->list, list) { - /* - * this process is already holding a reference to - * this queue, so no need to get one more - */ -- if (__cic->cfqq == __cfqq) { -+ if (__cic->key == cfqd) { - cic = __cic; -- spin_unlock_irqrestore(&ioc->lock, flags); - goto out; - } - } -- spin_unlock_irqrestore(&ioc->lock, flags); - - /* - * nope, process doesn't have a cic assoicated with this - * cfqq yet. get a new one and add to list - */ -- __cic = cfq_alloc_io_context(gfp_flags); -+ __cic = cfq_alloc_io_context(cfqd, gfp_mask); - if (__cic == NULL) - goto err; - - __cic->ioc = ioc; -- __cic->cfqq = __cfqq; -- atomic_inc(&__cfqq->ref); -- spin_lock_irqsave(&ioc->lock, flags); -+ __cic->key = cfqd; -+ atomic_inc(&cfqd->ref); - list_add(&__cic->list, &cic->list); -- spin_unlock_irqrestore(&ioc->lock, flags); -- - cic = __cic; -- *cfqq = __cfqq; - } - - out: -+ return cic; -+err: -+ put_io_context(ioc); -+ return NULL; -+} -+ -+static void -+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) -+{ -+ unsigned long elapsed, ttime; -+ -+ /* -+ * if this context already has stuff queued, thinktime is from -+ * last queue not last end -+ */ -+#if 0 -+ if (time_after(cic->last_end_request, cic->last_queue)) -+ elapsed = jiffies - cic->last_end_request; -+ else -+ elapsed = jiffies - cic->last_queue; -+#else -+ elapsed = jiffies - cic->last_end_request; -+#endif -+ -+ ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); -+ -+ cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; -+ cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; -+ cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; -+} -+ -+#define sample_valid(samples) ((samples) > 80) -+ -+/* -+ * Disable idle window if the process thinks too long or seeks so much that -+ * it doesn't matter -+ */ -+static void -+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, -+ struct cfq_io_context *cic) -+{ -+ int enable_idle = cfq_cfqq_idle_window(cfqq); -+ -+ if (!cic->ioc->task || !cfqd->cfq_slice_idle) -+ enable_idle = 0; -+ else if (sample_valid(cic->ttime_samples)) { -+ if (cic->ttime_mean > cfqd->cfq_slice_idle) -+ enable_idle = 0; -+ else -+ enable_idle = 1; -+ } -+ -+ if (enable_idle) -+ cfq_mark_cfqq_idle_window(cfqq); -+ else -+ cfq_clear_cfqq_idle_window(cfqq); -+} -+ -+ -+/* -+ * Check if new_cfqq should preempt the currently active queue. Return 0 for -+ * no or if we aren't sure, a 1 will cause a preempt. -+ */ -+static int -+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, -+ struct cfq_rq *crq) -+{ -+ struct cfq_queue *cfqq = cfqd->active_queue; -+ -+ if (cfq_class_idle(new_cfqq)) -+ return 0; -+ -+ if (!cfqq) -+ return 1; -+ -+ if (cfq_class_idle(cfqq)) -+ return 1; -+ if (!cfq_cfqq_wait_request(new_cfqq)) -+ return 0; - /* -- * if key_type has been changed on the fly, we lazily rehash -- * each queue at lookup time -+ * if it doesn't have slice left, forget it - */ -- if ((*cfqq)->key_type != cfqd->key_type) -- cfq_rehash_cfqq(cfqd, cfqq, cic); -+ if (new_cfqq->slice_left < cfqd->cfq_slice_idle) -+ return 0; -+ if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq)) -+ return 1; -+ -+ return 0; -+} -+ -+/* -+ * cfqq preempts the active queue. if we allowed preempt with no slice left, -+ * let it have half of its nominal slice. -+ */ -+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) -+{ -+ struct cfq_queue *__cfqq, *next; -+ -+ list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list) -+ cfq_resort_rr_list(__cfqq, 1); - -- return cic; --err: -- put_io_context(ioc); -- return NULL; -+ if (!cfqq->slice_left) -+ cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; -+ -+ cfqq->slice_end = cfqq->slice_left + jiffies; -+ __cfq_slice_expired(cfqd, cfqq, 1); -+ __cfq_set_active_queue(cfqd, cfqq); - } - --static struct cfq_queue * --__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask) -+/* -+ * should really be a ll_rw_blk.c helper -+ */ -+static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq) - { -- const int hashval = hash_long(key, CFQ_QHASH_SHIFT); -- struct cfq_queue *cfqq, *new_cfqq = NULL; -- --retry: -- cfqq = __cfq_find_cfq_hash(cfqd, key, hashval); -+ request_queue_t *q = cfqd->queue; - -- if (!cfqq) { -- if (new_cfqq) { -- cfqq = new_cfqq; -- new_cfqq = NULL; -- } else { -- spin_unlock_irq(cfqd->queue->queue_lock); -- new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); -- spin_lock_irq(cfqd->queue->queue_lock); -+ if (!blk_queue_plugged(q)) -+ q->request_fn(q); -+ else -+ __generic_unplug_device(q); -+} - -- if (!new_cfqq && !(gfp_mask & __GFP_WAIT)) -- goto out; -+/* -+ * Called when a new fs request (crq) is added (to cfqq). Check if there's -+ * something we should do about it -+ */ -+static void -+cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, -+ struct cfq_rq *crq) -+{ -+ struct cfq_io_context *cic; - -- goto retry; -- } -+ cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); - -- memset(cfqq, 0, sizeof(*cfqq)); -+ /* -+ * we never wait for an async request and we don't allow preemption -+ * of an async request. so just return early -+ */ -+ if (!cfq_crq_is_sync(crq)) -+ return; - -- INIT_HLIST_NODE(&cfqq->cfq_hash); -- INIT_LIST_HEAD(&cfqq->cfq_list); -- RB_CLEAR_ROOT(&cfqq->sort_list); -- INIT_LIST_HEAD(&cfqq->fifo[0]); -- INIT_LIST_HEAD(&cfqq->fifo[1]); -+ cic = crq->io_context; - -- cfqq->key = key; -- hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); -- atomic_set(&cfqq->ref, 0); -- cfqq->cfqd = cfqd; -- atomic_inc(&cfqd->ref); -- cfqq->key_type = cfqd->key_type; -- cfqq->service_start = ~0UL; -- } -+ cfq_update_io_thinktime(cfqd, cic); -+ cfq_update_idle_window(cfqd, cfqq, cic); - -- if (new_cfqq) -- kmem_cache_free(cfq_pool, new_cfqq); -+ cic->last_queue = jiffies; - -- atomic_inc(&cfqq->ref); --out: -- WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); -- return cfqq; -+ if (cfqq == cfqd->active_queue) { -+ /* -+ * if we are waiting for a request for this queue, let it rip -+ * immediately and flag that we must not expire this queue -+ * just now -+ */ -+ if (cfq_cfqq_wait_request(cfqq)) { -+ cfq_mark_cfqq_must_dispatch(cfqq); -+ del_timer(&cfqd->idle_slice_timer); -+ cfq_start_queueing(cfqd, cfqq); -+ } -+ } else if (cfq_should_preempt(cfqd, cfqq, crq)) { -+ /* -+ * not the active queue - expire current slice if it is -+ * idle and has expired it's mean thinktime or this new queue -+ * has some old slice time left and is of higher priority -+ */ -+ cfq_preempt_queue(cfqd, cfqq); -+ cfq_mark_cfqq_must_dispatch(cfqq); -+ cfq_start_queueing(cfqd, cfqq); -+ } - } - --static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) -+static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) - { -- crq->is_sync = 0; -- if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE) -- crq->is_sync = 1; -+ struct cfq_rq *crq = RQ_DATA(rq); -+ struct cfq_queue *cfqq = crq->cfq_queue; -+ -+ cfq_init_prio_data(cfqq); - - cfq_add_crq_rb(crq); -- crq->queue_start = jiffies; - -- list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]); -+ list_add_tail(&rq->queuelist, &cfqq->fifo); -+ -+ if (rq_mergeable(rq)) { -+ cfq_add_crq_hash(cfqd, crq); -+ -+ if (!cfqd->queue->last_merge) -+ cfqd->queue->last_merge = rq; -+ } -+ -+ cfq_crq_enqueued(cfqd, cfqq, crq); - } - - static void - cfq_insert_request(request_queue_t *q, struct request *rq, int where) - { - struct cfq_data *cfqd = q->elevator->elevator_data; -- struct cfq_rq *crq = RQ_DATA(rq); - - switch (where) { - case ELEVATOR_INSERT_BACK: -- while (cfq_dispatch_requests(q, cfqd->cfq_quantum)) -+ while (cfq_dispatch_requests(q, INT_MAX, 1)) - ; - list_add_tail(&rq->queuelist, &q->queue_head); -+ /* -+ * If we were idling with pending requests on -+ * inactive cfqqs, force dispatching will -+ * remove the idle timer and the queue won't -+ * be kicked by __make_request() afterward. -+ * Kick it here. -+ */ -+ cfq_schedule_dispatch(cfqd); - break; - case ELEVATOR_INSERT_FRONT: - list_add(&rq->queuelist, &q->queue_head); - break; - case ELEVATOR_INSERT_SORT: - BUG_ON(!blk_fs_request(rq)); -- cfq_enqueue(cfqd, crq); -+ cfq_enqueue(cfqd, rq); - break; - default: - printk("%s: bad insert point %d\n", __FUNCTION__,where); - return; - } -- -- if (rq_mergeable(rq)) { -- cfq_add_crq_hash(cfqd, crq); -- -- if (!q->last_merge) -- q->last_merge = rq; -- } --} -- --static int cfq_queue_empty(request_queue_t *q) --{ -- struct cfq_data *cfqd = q->elevator->elevator_data; -- -- return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list); - } - - static void cfq_completed_request(request_queue_t *q, struct request *rq) -@@ -1300,9 +1879,11 @@ - - cfqq = crq->cfq_queue; - -- if (crq->in_flight) { -- WARN_ON(!cfqq->in_flight); -- cfqq->in_flight--; -+ if (cfq_crq_in_flight(crq)) { -+ const int sync = cfq_crq_is_sync(crq); -+ -+ WARN_ON(!cfqq->on_dispatch[sync]); -+ cfqq->on_dispatch[sync]--; - } - - cfq_account_completion(cfqq, crq); -@@ -1332,51 +1913,136 @@ - return NULL; - } - --static int cfq_may_queue(request_queue_t *q, int rw) -+/* -+ * we temporarily boost lower priority queues if they are holding fs exclusive -+ * resources. they are boosted to normal prio (CLASS_BE/4) -+ */ -+static void cfq_prio_boost(struct cfq_queue *cfqq) - { -- struct cfq_data *cfqd = q->elevator->elevator_data; -- struct cfq_queue *cfqq; -- int ret = ELV_MQUEUE_MAY; -+ const int ioprio_class = cfqq->ioprio_class; -+ const int ioprio = cfqq->ioprio; - -- if (current->flags & PF_MEMALLOC) -- return ELV_MQUEUE_MAY; -+ if (has_fs_excl()) { -+ /* -+ * boost idle prio on transactions that would lock out other -+ * users of the filesystem -+ */ -+ if (cfq_class_idle(cfqq)) -+ cfqq->ioprio_class = IOPRIO_CLASS_BE; -+ if (cfqq->ioprio > IOPRIO_NORM) -+ cfqq->ioprio = IOPRIO_NORM; -+ } else { -+ /* -+ * check if we need to unboost the queue -+ */ -+ if (cfqq->ioprio_class != cfqq->org_ioprio_class) -+ cfqq->ioprio_class = cfqq->org_ioprio_class; -+ if (cfqq->ioprio != cfqq->org_ioprio) -+ cfqq->ioprio = cfqq->org_ioprio; -+ } - -- cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current)); -- if (cfqq) { -- int limit = cfqd->max_queued; -+ /* -+ * refile between round-robin lists if we moved the priority class -+ */ -+ if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && -+ cfq_cfqq_on_rr(cfqq)) -+ cfq_resort_rr_list(cfqq, 0); -+} - -- if (cfqq->allocated[rw] < cfqd->cfq_queued) -- return ELV_MQUEUE_MUST; -+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) -+{ -+ if (rw == READ || process_sync(task)) -+ return task->pid; -+ -+ return CFQ_KEY_ASYNC; -+} - -- if (cfqd->busy_queues) -- limit = q->nr_requests / cfqd->busy_queues; -+static inline int -+__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, -+ struct task_struct *task, int rw) -+{ -+#if 1 -+ if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && -+ !cfq_cfqq_must_alloc_slice(cfqq)) { -+ cfq_mark_cfqq_must_alloc_slice(cfqq); -+ return ELV_MQUEUE_MUST; -+ } - -- if (limit < cfqd->cfq_queued) -- limit = cfqd->cfq_queued; -- else if (limit > cfqd->max_queued) -- limit = cfqd->max_queued; -- -- if (cfqq->allocated[rw] >= limit) { -- if (limit > cfqq->alloc_limit[rw]) -- cfqq->alloc_limit[rw] = limit; -+ return ELV_MQUEUE_MAY; -+#else -+ if (!cfqq || task->flags & PF_MEMALLOC) -+ return ELV_MQUEUE_MAY; -+ if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { -+ if (cfq_cfqq_wait_request(cfqq)) -+ return ELV_MQUEUE_MUST; - -- ret = ELV_MQUEUE_NO; -+ /* -+ * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we -+ * can quickly flood the queue with writes from a single task -+ */ -+ if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { -+ cfq_mark_cfqq_must_alloc_slice(cfqq); -+ return ELV_MQUEUE_MUST; - } -+ -+ return ELV_MQUEUE_MAY; - } -+ if (cfq_class_idle(cfqq)) -+ return ELV_MQUEUE_NO; -+ if (cfqq->allocated[rw] >= cfqd->max_queued) { -+ struct io_context *ioc = get_io_context(GFP_ATOMIC); -+ int ret = ELV_MQUEUE_NO; - -- return ret; -+ if (ioc && ioc->nr_batch_requests) -+ ret = ELV_MQUEUE_MAY; -+ -+ put_io_context(ioc); -+ return ret; -+ } -+ -+ return ELV_MQUEUE_MAY; -+#endif -+} -+ -+static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) -+{ -+ struct cfq_data *cfqd = q->elevator->elevator_data; -+ struct task_struct *tsk = current; -+ struct cfq_queue *cfqq; -+ -+ /* -+ * don't force setup of a queue from here, as a call to may_queue -+ * does not necessarily imply that a request actually will be queued. -+ * so just lookup a possibly existing queue, or return 'may queue' -+ * if that fails -+ */ -+ cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); -+ if (cfqq) { -+ cfq_init_prio_data(cfqq); -+ cfq_prio_boost(cfqq); -+ -+ return __cfq_may_queue(cfqd, cfqq, tsk, rw); -+ } -+ -+ return ELV_MQUEUE_MAY; - } - - static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) - { -+ struct cfq_data *cfqd = q->elevator->elevator_data; - struct request_list *rl = &q->rq; -- const int write = waitqueue_active(&rl->wait[WRITE]); -- const int read = waitqueue_active(&rl->wait[READ]); - -- if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ]) -- wake_up(&rl->wait[READ]); -- if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE]) -- wake_up(&rl->wait[WRITE]); -+ if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { -+ smp_mb(); -+ if (waitqueue_active(&rl->wait[READ])) -+ wake_up(&rl->wait[READ]); -+ } -+ -+ if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { -+ smp_mb(); -+ if (waitqueue_active(&rl->wait[WRITE])) -+ wake_up(&rl->wait[WRITE]); -+ } - } - - /* -@@ -1389,69 +2055,61 @@ - - if (crq) { - struct cfq_queue *cfqq = crq->cfq_queue; -+ const int rw = rq_data_dir(rq); - -- BUG_ON(q->last_merge == rq); -- BUG_ON(!hlist_unhashed(&crq->hash)); -- -- if (crq->io_context) -- put_io_context(crq->io_context->ioc); -+ BUG_ON(!cfqq->allocated[rw]); -+ cfqq->allocated[rw]--; - -- BUG_ON(!cfqq->allocated[crq->is_write]); -- cfqq->allocated[crq->is_write]--; -+ put_io_context(crq->io_context->ioc); - - mempool_free(crq, cfqd->crq_pool); - rq->elevator_private = NULL; - -- smp_mb(); - cfq_check_waiters(q, cfqq); - cfq_put_queue(cfqq); - } - } - - /* -- * Allocate cfq data structures associated with this request. A queue and -+ * Allocate cfq data structures associated with this request. - */ --static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) -+static int -+cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, -+ int gfp_mask) - { - struct cfq_data *cfqd = q->elevator->elevator_data; -+ struct task_struct *tsk = current; - struct cfq_io_context *cic; - const int rw = rq_data_dir(rq); -- struct cfq_queue *cfqq, *saved_cfqq; -+ pid_t key = cfq_queue_pid(tsk, rw); -+ struct cfq_queue *cfqq; - struct cfq_rq *crq; - unsigned long flags; - - might_sleep_if(gfp_mask & __GFP_WAIT); - -+ cic = cfq_get_io_context(cfqd, key, gfp_mask); -+ - spin_lock_irqsave(q->queue_lock, flags); - -- cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask); -- if (!cfqq) -- goto out_lock; -+ if (!cic) -+ goto queue_fail; - --repeat: -- if (cfqq->allocated[rw] >= cfqd->max_queued) -- goto out_lock; -+ if (!cic->cfqq) { -+ cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); -+ if (!cfqq) -+ goto queue_fail; -+ -+ cic->cfqq = cfqq; -+ } else -+ cfqq = cic->cfqq; - - cfqq->allocated[rw]++; -+ cfq_clear_cfqq_must_alloc(cfqq); -+ cfqd->rq_starved = 0; -+ atomic_inc(&cfqq->ref); - spin_unlock_irqrestore(q->queue_lock, flags); - -- /* -- * if hashing type has changed, the cfq_queue might change here. -- */ -- saved_cfqq = cfqq; -- cic = cfq_get_io_context(&cfqq, gfp_mask); -- if (!cic) -- goto err; -- -- /* -- * repeat allocation checks on queue change -- */ -- if (unlikely(saved_cfqq != cfqq)) { -- spin_lock_irqsave(q->queue_lock, flags); -- saved_cfqq->allocated[rw]--; -- goto repeat; -- } -- - crq = mempool_alloc(cfqd->crq_pool, gfp_mask); - if (crq) { - RB_CLEAR(&crq->rb_node); -@@ -1460,24 +2118,141 @@ - INIT_HLIST_NODE(&crq->hash); - crq->cfq_queue = cfqq; - crq->io_context = cic; -- crq->service_start = crq->queue_start = 0; -- crq->in_flight = crq->accounted = crq->is_sync = 0; -- crq->is_write = rw; -+ cfq_clear_crq_in_flight(crq); -+ cfq_clear_crq_in_driver(crq); -+ cfq_clear_crq_requeued(crq); -+ -+ if (rw == READ || process_sync(tsk)) -+ cfq_mark_crq_is_sync(crq); -+ else -+ cfq_clear_crq_is_sync(crq); -+ - rq->elevator_private = crq; -- cfqq->alloc_limit[rw] = 0; - return 0; - } - -- put_io_context(cic->ioc); --err: - spin_lock_irqsave(q->queue_lock, flags); - cfqq->allocated[rw]--; -+ if (!(cfqq->allocated[0] + cfqq->allocated[1])) -+ cfq_mark_cfqq_must_alloc(cfqq); - cfq_put_queue(cfqq); --out_lock: -+queue_fail: -+ if (cic) -+ put_io_context(cic->ioc); -+ /* -+ * mark us rq allocation starved. we need to kickstart the process -+ * ourselves if there are no pending requests that can do it for us. -+ * that would be an extremely rare OOM situation -+ */ -+ cfqd->rq_starved = 1; -+ cfq_schedule_dispatch(cfqd); - spin_unlock_irqrestore(q->queue_lock, flags); - return 1; - } - -+static void cfq_kick_queue(void *data) -+{ -+ request_queue_t *q = data; -+ struct cfq_data *cfqd = q->elevator->elevator_data; -+ unsigned long flags; -+ -+ spin_lock_irqsave(q->queue_lock, flags); -+ -+ if (cfqd->rq_starved) { -+ struct request_list *rl = &q->rq; -+ -+ /* -+ * we aren't guaranteed to get a request after this, but we -+ * have to be opportunistic -+ */ -+ smp_mb(); -+ if (waitqueue_active(&rl->wait[READ])) -+ wake_up(&rl->wait[READ]); -+ if (waitqueue_active(&rl->wait[WRITE])) -+ wake_up(&rl->wait[WRITE]); -+ } -+ -+ blk_remove_plug(q); -+ q->request_fn(q); -+ spin_unlock_irqrestore(q->queue_lock, flags); -+} -+ -+/* -+ * Timer running if the active_queue is currently idling inside its time slice -+ */ -+static void cfq_idle_slice_timer(unsigned long data) -+{ -+ struct cfq_data *cfqd = (struct cfq_data *) data; -+ struct cfq_queue *cfqq; -+ unsigned long flags; -+ -+ spin_lock_irqsave(cfqd->queue->queue_lock, flags); -+ -+ if ((cfqq = cfqd->active_queue) != NULL) { -+ unsigned long now = jiffies; -+ -+ /* -+ * expired -+ */ -+ if (time_after(now, cfqq->slice_end)) -+ goto expire; -+ -+ /* -+ * only expire and reinvoke request handler, if there are -+ * other queues with pending requests -+ */ -+ if (!cfq_pending_requests(cfqd)) { -+ cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); -+ add_timer(&cfqd->idle_slice_timer); -+ goto out_cont; -+ } -+ -+ /* -+ * not expired and it has a request pending, let it dispatch -+ */ -+ if (!RB_EMPTY(&cfqq->sort_list)) { -+ cfq_mark_cfqq_must_dispatch(cfqq); -+ goto out_kick; -+ } -+ } -+expire: -+ cfq_slice_expired(cfqd, 0); -+out_kick: -+ cfq_schedule_dispatch(cfqd); -+out_cont: -+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); -+} -+ -+/* -+ * Timer running if an idle class queue is waiting for service -+ */ -+static void cfq_idle_class_timer(unsigned long data) -+{ -+ struct cfq_data *cfqd = (struct cfq_data *) data; -+ unsigned long flags, end; -+ -+ spin_lock_irqsave(cfqd->queue->queue_lock, flags); -+ -+ /* -+ * race with a non-idle queue, reset timer -+ */ -+ end = cfqd->last_end_request + CFQ_IDLE_GRACE; -+ if (!time_after_eq(jiffies, end)) { -+ cfqd->idle_class_timer.expires = end; -+ add_timer(&cfqd->idle_class_timer); -+ } else -+ cfq_schedule_dispatch(cfqd); -+ -+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); -+} -+ -+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) -+{ -+ del_timer_sync(&cfqd->idle_slice_timer); -+ del_timer_sync(&cfqd->idle_class_timer); -+ blk_sync_queue(cfqd->queue); -+} -+ - static void cfq_put_cfqd(struct cfq_data *cfqd) - { - request_queue_t *q = cfqd->queue; -@@ -1485,6 +2260,7 @@ - if (!atomic_dec_and_test(&cfqd->ref)) - return; - -+ cfq_shutdown_timer_wq(cfqd); - blk_put_queue(q); - - mempool_destroy(cfqd->crq_pool); -@@ -1495,7 +2271,10 @@ - - static void cfq_exit_queue(elevator_t *e) - { -- cfq_put_cfqd(e->elevator_data); -+ struct cfq_data *cfqd = e->elevator_data; -+ -+ cfq_shutdown_timer_wq(cfqd); -+ cfq_put_cfqd(cfqd); - } - - static int cfq_init_queue(request_queue_t *q, elevator_t *e) -@@ -1508,7 +2287,13 @@ - return -ENOMEM; - - memset(cfqd, 0, sizeof(*cfqd)); -- INIT_LIST_HEAD(&cfqd->rr_list); -+ -+ for (i = 0; i < CFQ_PRIO_LISTS; i++) -+ INIT_LIST_HEAD(&cfqd->rr_list[i]); -+ -+ INIT_LIST_HEAD(&cfqd->busy_rr); -+ INIT_LIST_HEAD(&cfqd->cur_rr); -+ INIT_LIST_HEAD(&cfqd->idle_rr); - INIT_LIST_HEAD(&cfqd->empty_list); - - cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); -@@ -1533,24 +2318,32 @@ - cfqd->queue = q; - atomic_inc(&q->refcnt); - -- /* -- * just set it to some high value, we want anyone to be able to queue -- * some requests. fairness is handled differently -- */ -- q->nr_requests = 1024; -- cfqd->max_queued = q->nr_requests / 16; -+ cfqd->max_queued = q->nr_requests / 4; - q->nr_batching = cfq_queued; -- cfqd->key_type = CFQ_KEY_TGID; -- cfqd->find_best_crq = 1; -+ -+ init_timer(&cfqd->idle_slice_timer); -+ cfqd->idle_slice_timer.function = cfq_idle_slice_timer; -+ cfqd->idle_slice_timer.data = (unsigned long) cfqd; -+ -+ init_timer(&cfqd->idle_class_timer); -+ cfqd->idle_class_timer.function = cfq_idle_class_timer; -+ cfqd->idle_class_timer.data = (unsigned long) cfqd; -+ -+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); -+ - atomic_set(&cfqd->ref, 1); - - cfqd->cfq_queued = cfq_queued; - cfqd->cfq_quantum = cfq_quantum; -- cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r; -- cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w; -- cfqd->cfq_fifo_batch_expire = cfq_fifo_rate; -+ cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; -+ cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; - cfqd->cfq_back_max = cfq_back_max; - cfqd->cfq_back_penalty = cfq_back_penalty; -+ cfqd->cfq_slice[0] = cfq_slice_async; -+ cfqd->cfq_slice[1] = cfq_slice_sync; -+ cfqd->cfq_slice_async_rq = cfq_slice_async_rq; -+ cfqd->cfq_slice_idle = cfq_slice_idle; -+ cfqd->cfq_max_depth = cfq_max_depth; - - return 0; - out_crqpool: -@@ -1595,7 +2388,6 @@ - return -ENOMEM; - } - -- - /* - * sysfs parts below --> - */ -@@ -1620,45 +2412,6 @@ - return count; - } - --static ssize_t --cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count) --{ -- max_elapsed_dispatch = max_elapsed_crq = 0; -- return count; --} -- --static ssize_t --cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count) --{ -- spin_lock_irq(cfqd->queue->queue_lock); -- if (!strncmp(page, "pgid", 4)) -- cfqd->key_type = CFQ_KEY_PGID; -- else if (!strncmp(page, "tgid", 4)) -- cfqd->key_type = CFQ_KEY_TGID; -- else if (!strncmp(page, "uid", 3)) -- cfqd->key_type = CFQ_KEY_UID; -- else if (!strncmp(page, "gid", 3)) -- cfqd->key_type = CFQ_KEY_GID; -- spin_unlock_irq(cfqd->queue->queue_lock); -- return count; --} -- --static ssize_t --cfq_read_key_type(struct cfq_data *cfqd, char *page) --{ -- ssize_t len = 0; -- int i; -- -- for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) { -- if (cfqd->key_type == i) -- len += sprintf(page+len, "[%s] ", cfq_key_types[i]); -- else -- len += sprintf(page+len, "%s ", cfq_key_types[i]); -- } -- len += sprintf(page+len, "\n"); -- return len; --} -- - #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ - static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ - { \ -@@ -1669,12 +2422,15 @@ - } - SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); - SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); --SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1); --SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1); --SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1); --SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0); -+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); -+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); - SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); - SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); -+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); -+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); -+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); -+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); -+SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); - #undef SHOW_FUNCTION - - #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -@@ -1694,12 +2450,15 @@ - } - STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); - STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); --STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1); --STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1); --STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1); --STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0); -+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); -+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); - STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); - STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); -+STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); -+STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); -+STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); -+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); -+STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); - #undef STORE_FUNCTION - - static struct cfq_fs_entry cfq_quantum_entry = { -@@ -1712,25 +2471,15 @@ - .show = cfq_queued_show, - .store = cfq_queued_store, - }; --static struct cfq_fs_entry cfq_fifo_expire_r_entry = { -+static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { - .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, -- .show = cfq_fifo_expire_r_show, -- .store = cfq_fifo_expire_r_store, -+ .show = cfq_fifo_expire_sync_show, -+ .store = cfq_fifo_expire_sync_store, - }; --static struct cfq_fs_entry cfq_fifo_expire_w_entry = { -+static struct cfq_fs_entry cfq_fifo_expire_async_entry = { - .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, -- .show = cfq_fifo_expire_w_show, -- .store = cfq_fifo_expire_w_store, --}; --static struct cfq_fs_entry cfq_fifo_batch_expire_entry = { -- .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR }, -- .show = cfq_fifo_batch_expire_show, -- .store = cfq_fifo_batch_expire_store, --}; --static struct cfq_fs_entry cfq_find_best_entry = { -- .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR }, -- .show = cfq_find_best_show, -- .store = cfq_find_best_store, -+ .show = cfq_fifo_expire_async_show, -+ .store = cfq_fifo_expire_async_store, - }; - static struct cfq_fs_entry cfq_back_max_entry = { - .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, -@@ -1742,27 +2491,44 @@ - .show = cfq_back_penalty_show, - .store = cfq_back_penalty_store, - }; --static struct cfq_fs_entry cfq_clear_elapsed_entry = { -- .attr = {.name = "clear_elapsed", .mode = S_IWUSR }, -- .store = cfq_clear_elapsed, --}; --static struct cfq_fs_entry cfq_key_type_entry = { -- .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR }, -- .show = cfq_read_key_type, -- .store = cfq_set_key_type, -+static struct cfq_fs_entry cfq_slice_sync_entry = { -+ .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, -+ .show = cfq_slice_sync_show, -+ .store = cfq_slice_sync_store, -+}; -+static struct cfq_fs_entry cfq_slice_async_entry = { -+ .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, -+ .show = cfq_slice_async_show, -+ .store = cfq_slice_async_store, -+}; -+static struct cfq_fs_entry cfq_slice_async_rq_entry = { -+ .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, -+ .show = cfq_slice_async_rq_show, -+ .store = cfq_slice_async_rq_store, -+}; -+static struct cfq_fs_entry cfq_slice_idle_entry = { -+ .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, -+ .show = cfq_slice_idle_show, -+ .store = cfq_slice_idle_store, -+}; -+static struct cfq_fs_entry cfq_max_depth_entry = { -+ .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, -+ .show = cfq_max_depth_show, -+ .store = cfq_max_depth_store, - }; - - static struct attribute *default_attrs[] = { - &cfq_quantum_entry.attr, - &cfq_queued_entry.attr, -- &cfq_fifo_expire_r_entry.attr, -- &cfq_fifo_expire_w_entry.attr, -- &cfq_fifo_batch_expire_entry.attr, -- &cfq_key_type_entry.attr, -- &cfq_find_best_entry.attr, -+ &cfq_fifo_expire_sync_entry.attr, -+ &cfq_fifo_expire_async_entry.attr, - &cfq_back_max_entry.attr, - &cfq_back_penalty_entry.attr, -- &cfq_clear_elapsed_entry.attr, -+ &cfq_slice_sync_entry.attr, -+ &cfq_slice_async_entry.attr, -+ &cfq_slice_async_rq_entry.attr, -+ &cfq_slice_idle_entry.attr, -+ &cfq_max_depth_entry.attr, - NULL, - }; - -@@ -1832,21 +2598,46 @@ - { - int ret; - -+ /* -+ * could be 0 on HZ < 1000 setups -+ */ -+ if (!cfq_slice_async) -+ cfq_slice_async = 1; -+ if (!cfq_slice_idle) -+ cfq_slice_idle = 1; -+ - if (cfq_slab_setup()) - return -ENOMEM; - - ret = elv_register(&iosched_cfq); -- if (!ret) { -- __module_get(THIS_MODULE); -- return 0; -- } -+ if (ret) -+ cfq_slab_kill(); - -- cfq_slab_kill(); - return ret; - } - - static void __exit cfq_exit(void) - { -+ struct task_struct *g, *p; -+ unsigned long flags; -+ -+ read_lock_irqsave(&tasklist_lock, flags); -+ -+ /* -+ * iterate each process in the system, removing our io_context -+ */ -+ do_each_thread(g, p) { -+ struct io_context *ioc = p->io_context; -+ -+ if (ioc && ioc->cic) { -+ ioc->cic->exit(ioc->cic); -+ cfq_free_io_context(ioc->cic); -+ ioc->cic = NULL; -+ } -+ } while_each_thread(g, p); -+ -+ read_unlock_irqrestore(&tasklist_lock, flags); -+ - cfq_slab_kill(); - elv_unregister(&iosched_cfq); - } -diff -Naur 2.6.12-5.0-org/drivers/block/deadline-iosched.c 2.6.12-5.0-patched/drivers/block/deadline-iosched.c ---- 2.6.12-5.0-org/drivers/block/deadline-iosched.c 2007-07-26 00:53:20.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/block/deadline-iosched.c 2007-12-11 12:34:52.000000000 +0100 -@@ -758,7 +758,8 @@ - } - - static int --deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) -+deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, -+ int gfp_mask) - { - struct deadline_data *dd = q->elevator->elevator_data; - struct deadline_rq *drq; -diff -Naur 2.6.12-5.0-org/drivers/block/elevator.c 2.6.12-5.0-patched/drivers/block/elevator.c ---- 2.6.12-5.0-org/drivers/block/elevator.c 2007-07-26 00:53:20.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/block/elevator.c 2007-12-11 12:34:52.000000000 +0100 -@@ -158,27 +158,6 @@ - - static char chosen_elevator[16]; - --static void elevator_setup_default(void) --{ -- /* -- * check if default is set and exists -- */ -- if (chosen_elevator[0] && elevator_find(chosen_elevator)) -- return; -- --#if defined(CONFIG_IOSCHED_AS) -- strcpy(chosen_elevator, "anticipatory"); --#elif defined(CONFIG_IOSCHED_DEADLINE) -- strcpy(chosen_elevator, "deadline"); --#elif defined(CONFIG_IOSCHED_CFQ) -- strcpy(chosen_elevator, "cfq"); --#elif defined(CONFIG_IOSCHED_NOOP) -- strcpy(chosen_elevator, "noop"); --#else --#error "You must build at least 1 IO scheduler into the kernel" --#endif --} -- - static int __init elevator_setup(char *str) - { - strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); -@@ -193,15 +172,17 @@ - struct elevator_queue *eq; - int ret = 0; - -- elevator_setup_default(); -- -- if (!name) -- name = chosen_elevator; -- -- e = elevator_get(name); -- if (!e) -+ if (name && !(e = elevator_get(name))) - return -EINVAL; -- -+ -+ if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) -+ printk("I/O scheduler %s not found\n", chosen_elevator); -+ -+ if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { -+ printk("Default I/O scheduler not found, using no+op\n"); -+ e = elevator_get("noop"); -+ } -+ - eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); - if (!eq) { - elevator_put(e->elevator_type); -@@ -480,12 +461,13 @@ - return NULL; - } - --int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask) -+int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, -+ int gfp_mask) - { - elevator_t *e = q->elevator; - - if (e->ops->elevator_set_req_fn) -- return e->ops->elevator_set_req_fn(q, rq, gfp_mask); -+ return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask); - - rq->elevator_private = NULL; - return 0; -@@ -499,12 +481,12 @@ - e->ops->elevator_put_req_fn(q, rq); - } - --int elv_may_queue(request_queue_t *q, int rw) -+int elv_may_queue(request_queue_t *q, int rw, struct bio *bio) - { - elevator_t *e = q->elevator; - - if (e->ops->elevator_may_queue_fn) -- return e->ops->elevator_may_queue_fn(q, rw); -+ return e->ops->elevator_may_queue_fn(q, rw, bio); - - return ELV_MQUEUE_MAY; - } -diff -Naur 2.6.12-5.0-org/drivers/block/Kconfig.iosched 2.6.12-5.0-patched/drivers/block/Kconfig.iosched ---- 2.6.12-5.0-org/drivers/block/Kconfig.iosched 2007-07-26 00:53:20.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/block/Kconfig.iosched 2007-12-11 12:34:52.000000000 +0100 -@@ -38,4 +38,32 @@ - among all processes in the system. It should provide a fair - working environment, suitable for desktop systems. - -+choice -+ prompt "Default I/O scheduler" -+ default DEFAULT_AS -+ help -+ Select the I/O scheduler which will be used by default for all -+ block devices. -+ -+ config DEFAULT_AS -+ bool "Anticipatory" if IOSCHED_AS=y -+ -+ config DEFAULT_DEADLINE -+ bool "Deadline" if IOSCHED_DEADLINE=y -+ -+ config DEFAULT_CFQ -+ bool "CFQ" if IOSCHED_CFQ=y -+ -+ config DEFAULT_NOOP -+ bool "No-op" -+ -+endchoice -+ -+config DEFAULT_IOSCHED -+ string -+ default "anticipatory" if DEFAULT_AS -+ default "deadline" if DEFAULT_DEADLINE -+ default "cfq" if DEFAULT_CFQ -+ default "noop" if DEFAULT_NOOP -+ - endmenu -diff -Naur 2.6.12-5.0-org/drivers/block/ll_rw_blk.c 2.6.12-5.0-patched/drivers/block/ll_rw_blk.c ---- 2.6.12-5.0-org/drivers/block/ll_rw_blk.c 2007-07-26 00:53:21.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/block/ll_rw_blk.c 2007-12-11 12:37:54.000000000 +0100 -@@ -287,6 +287,7 @@ - rq->errors = 0; - rq->rq_status = RQ_ACTIVE; - rq->bio = rq->biotail = NULL; -+ rq->ioprio = 0; - rq->buffer = NULL; - rq->ref_count = 1; - rq->q = q; -@@ -1522,11 +1523,7 @@ - if (!blk_remove_plug(q)) - return; - -- /* -- * was plugged, fire request_fn if queue has stuff to do -- */ -- if (elv_next_request(q)) -- q->request_fn(q); -+ q->request_fn(q); - } - EXPORT_SYMBOL(__generic_unplug_device); - -@@ -1841,8 +1838,8 @@ - mempool_free(rq, q->rq.rq_pool); - } - --static inline struct request *blk_alloc_request(request_queue_t *q, int rw, -- int gfp_mask) -+static inline struct request * -+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) - { - struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); - -@@ -1855,7 +1852,7 @@ - */ - rq->flags = rw; - -- if (!elv_set_request(q, rq, gfp_mask)) -+ if (!elv_set_request(q, rq, bio, gfp_mask)) - return rq; - - mempool_free(rq, q->rq.rq_pool); -@@ -1938,7 +1935,8 @@ - /* - * Get a free request, queue_lock must not be held - */ --static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) -+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, -+ int gfp_mask) - { - struct request *rq = NULL; - struct request_list *rl = &q->rq; -@@ -1961,7 +1959,7 @@ - } - } - -- switch (elv_may_queue(q, rw)) { -+ switch (elv_may_queue(q, rw, bio)) { - case ELV_MQUEUE_NO: - goto rq_starved; - case ELV_MQUEUE_MAY: -@@ -1986,7 +1984,7 @@ - set_queue_congested(q, rw); - spin_unlock_irq(q->queue_lock); - -- rq = blk_alloc_request(q, rw, gfp_mask); -+ rq = blk_alloc_request(q, rw, bio, gfp_mask); - if (!rq) { - /* - * Allocation failed presumably due to memory. Undo anything -@@ -2027,7 +2025,8 @@ - * No available requests for this queue, unplug the device and wait for some - * requests to become available. - */ --static struct request *get_request_wait(request_queue_t *q, int rw) -+static struct request *get_request_wait(request_queue_t *q, int rw, -+ struct bio *bio) - { - DEFINE_WAIT(wait); - struct request *rq; -@@ -2039,7 +2038,7 @@ - prepare_to_wait_exclusive(&rl->wait[rw], &wait, - TASK_UNINTERRUPTIBLE); - -- rq = get_request(q, rw, GFP_NOIO); -+ rq = get_request(q, rw, bio, GFP_NOIO); - - if (!rq) { - struct io_context *ioc; -@@ -2069,9 +2068,9 @@ - BUG_ON(rw != READ && rw != WRITE); - - if (gfp_mask & __GFP_WAIT) -- rq = get_request_wait(q, rw); -+ rq = get_request_wait(q, rw, NULL); - else -- rq = get_request(q, rw, gfp_mask); -+ rq = get_request(q, rw, NULL, gfp_mask); - - return rq; - } -@@ -2445,7 +2444,6 @@ - return; - - req->rq_status = RQ_INACTIVE; -- req->q = NULL; - req->rl = NULL; - - /* -@@ -2583,6 +2581,8 @@ - req->rq_disk->in_flight--; - } - -+ req->ioprio = ioprio_best(req->ioprio, next->ioprio); -+ - __blk_put_request(q, next); - return 1; - } -@@ -2645,11 +2645,13 @@ - { - struct request *req, *freereq = NULL; - int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; -+ unsigned short prio; - sector_t sector; - - sector = bio->bi_sector; - nr_sectors = bio_sectors(bio); - cur_nr_sectors = bio_cur_sectors(bio); -+ prio = bio_prio(bio); - - rw = bio_data_dir(bio); - sync = bio_sync(bio); -@@ -2696,6 +2698,7 @@ - set_bit(__REQ_DIRECTIO, &req->flags); - } - #endif -+ req->ioprio = ioprio_best(req->ioprio, prio); - drive_stat_acct(req, nr_sectors, 0); - if (!attempt_back_merge(q, req)) - elv_merged_request(q, req); -@@ -2726,6 +2729,7 @@ - set_bit(__REQ_DIRECTIO, &req->flags); - } - #endif -+ req->ioprio = ioprio_best(req->ioprio, prio); - drive_stat_acct(req, nr_sectors, 0); - if (!attempt_front_merge(q, req)) - elv_merged_request(q, req); -@@ -2753,7 +2757,7 @@ - freereq = NULL; - } else { - spin_unlock_irq(q->queue_lock); -- if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) { -+ if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) { - /* - * READA bit set - */ -@@ -2761,7 +2765,7 @@ - if (bio_rw_ahead(bio)) - goto end_io; - -- freereq = get_request_wait(q, rw); -+ freereq = get_request_wait(q, rw, bio); - } - goto again; - } -@@ -2789,6 +2793,7 @@ - req->buffer = bio_data(bio); /* see ->buffer comment above */ - req->waiting = NULL; - req->bio = req->biotail = bio; -+ req->ioprio = prio; - req->rq_disk = bio->bi_bdev->bd_disk; - req->start_time = jiffies; - #if defined (CONFIG_MIPS_BCM7440) -@@ -2821,7 +2826,7 @@ - if (bdev != bdev->bd_contains) { - struct hd_struct *p = bdev->bd_part; - -- switch (bio->bi_rw) { -+ switch (bio_data_dir(bio)) { - case READ: - p->read_sectors += bio_sectors(bio); - p->reads++; -@@ -2840,6 +2845,7 @@ - { - struct request_list *rl = &q->rq; - struct request *rq; -+ int requeued = 0; - - spin_lock_irq(q->queue_lock); - clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); -@@ -2848,9 +2854,13 @@ - rq = list_entry_rq(q->drain_list.next); - - list_del_init(&rq->queuelist); -- __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); -+ elv_requeue_request(q, rq); -+ requeued++; - } - -+ if (requeued) -+ q->request_fn(q); -+ - spin_unlock_irq(q->queue_lock); - - wake_up(&rl->wait[0]); -@@ -3056,7 +3066,7 @@ - - BIO_BUG_ON(!bio->bi_size); - BIO_BUG_ON(!bio->bi_io_vec); -- bio->bi_rw = rw; -+ bio->bi_rw |= rw; - if (rw & WRITE) - mod_page_state(pgpgout, count); - else -@@ -3418,8 +3428,11 @@ - struct io_context *ioc; - - local_irq_save(flags); -+ task_lock(current); - ioc = current->io_context; - current->io_context = NULL; -+ ioc->task = NULL; -+ task_unlock(current); - local_irq_restore(flags); - - if (ioc->aic && ioc->aic->exit) -@@ -3454,12 +3467,12 @@ - ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); - if (ret) { - atomic_set(&ret->refcount, 1); -- ret->pid = tsk->pid; -+ ret->task = current; -+ ret->set_ioprio = NULL; - ret->last_waited = jiffies; /* doesn't matter... */ - ret->nr_batch_requests = 0; /* because this is 0 */ - ret->aic = NULL; - ret->cic = NULL; -- spin_lock_init(&ret->lock); - - local_irq_save(flags); - -diff -Naur 2.6.12-5.0-org/drivers/char/brcmserial.c 2.6.12-5.0-patched/drivers/char/brcmserial.c ---- 2.6.12-5.0-org/drivers/char/brcmserial.c 2007-07-26 00:53:27.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/char/brcmserial.c 2007-12-11 12:34:52.000000000 +0100 -@@ -1208,7 +1208,7 @@ - 600, 1200, 1800, 2400, 4800, 9600, 19200, - 38400, 57600, 115200, 230400, 460800, 0 }; - --static int tty_get_baud_rate(struct tty_struct *tty) -+int tty_get_baud_rate(struct tty_struct *tty) - { - struct async_struct * info = (struct async_struct *)tty->driver_data; - unsigned int cflag, i; -diff -Naur 2.6.12-5.0-org/drivers/char/keyboard.c 2.6.12-5.0-patched/drivers/char/keyboard.c ---- 2.6.12-5.0-org/drivers/char/keyboard.c 2007-07-26 00:53:29.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/char/keyboard.c 2007-12-11 12:34:52.000000000 +0100 -@@ -1186,6 +1186,9 @@ - for (i = KEY_RESERVED; i < BTN_MISC; i++) - if (test_bit(i, dev->keybit)) break; - -+ if ( test_bit(EV_NO_CONSOLE, dev->evbit) ) -+ return NULL; -+ - if ((i == BTN_MISC) && !test_bit(EV_SND, dev->evbit)) - return NULL; - -diff -Naur 2.6.12-5.0-org/drivers/char/tty_io.c 2.6.12-5.0-patched/drivers/char/tty_io.c ---- 2.6.12-5.0-org/drivers/char/tty_io.c 2007-07-26 00:53:30.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/char/tty_io.c 2007-12-11 12:34:52.000000000 +0100 -@@ -2592,6 +2592,7 @@ - * flags may be updated. - */ - -+#if 0 - int tty_get_baud_rate(struct tty_struct *tty) - { - int baud = tty_termios_baud_rate(tty->termios); -@@ -2607,6 +2608,7 @@ - - return baud; - } -+#endif - - EXPORT_SYMBOL(tty_get_baud_rate); - -diff -Naur 2.6.12-5.0-org/drivers/video/Kconfig 2.6.12-5.0-patched/drivers/video/Kconfig ---- 2.6.12-5.0-org/drivers/video/Kconfig 2007-07-26 00:54:49.000000000 +0200 -+++ 2.6.12-5.0-patched/drivers/video/Kconfig 2007-12-11 12:34:52.000000000 +0100 -@@ -39,7 +39,7 @@ - device-aware may cause unexpected results. If unsure, say N. - - config FB_CFB_FILLRECT -- tristate -+ tristate "FB_CFB_FILLRECT" - depends on FB - default n - ---help--- -@@ -48,7 +48,7 @@ - (accelerated) version. - - config FB_CFB_COPYAREA -- tristate -+ tristate "FB_CFB_COPYAREA" - depends on FB - default n - ---help--- -@@ -57,7 +57,7 @@ - version. - - config FB_CFB_IMAGEBLIT -- tristate -+ tristate "FB_CFB_IMAGEBLIT" - depends on FB - default n - ---help--- -@@ -66,7 +66,7 @@ - (accelerated) version. - - config FB_SOFT_CURSOR -- tristate -+ tristate "FB_SOFT_CURSOR" - depends on FB - default n - ---help--- -diff -Naur 2.6.12-5.0-org/fs/ioprio.c 2.6.12-5.0-patched/fs/ioprio.c ---- 2.6.12-5.0-org/fs/ioprio.c 1970-01-01 01:00:00.000000000 +0100 -+++ 2.6.12-5.0-patched/fs/ioprio.c 2007-12-11 12:34:52.000000000 +0100 -@@ -0,0 +1,172 @@ -+/* -+ * fs/ioprio.c -+ * -+ * Copyright (C) 2004 Jens Axboe <axboe@suse.de> -+ * -+ * Helper functions for setting/querying io priorities of processes. The -+ * system calls closely mimmick getpriority/setpriority, see the man page for -+ * those. The prio argument is a composite of prio class and prio data, where -+ * the data argument has meaning within that class. The standard scheduling -+ * classes have 8 distinct prio levels, with 0 being the highest prio and 7 -+ * being the lowest. -+ * -+ * IOW, setting BE scheduling class with prio 2 is done ala: -+ * -+ * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; -+ * -+ * ioprio_set(PRIO_PROCESS, pid, prio); -+ * -+ * See also Documentation/block/ioprio.txt -+ * -+ */ -+#include <linux/kernel.h> -+#include <linux/ioprio.h> -+#include <linux/blkdev.h> -+ -+static int set_task_ioprio(struct task_struct *task, int ioprio) -+{ -+ struct io_context *ioc; -+ -+ if (task->uid != current->euid && -+ task->uid != current->uid && !capable(CAP_SYS_NICE)) -+ return -EPERM; -+ -+ task_lock(task); -+ -+ task->ioprio = ioprio; -+ -+ ioc = task->io_context; -+ if (ioc && ioc->set_ioprio) -+ ioc->set_ioprio(ioc, ioprio); -+ -+ task_unlock(task); -+ return 0; -+} -+ -+asmlinkage int sys_ioprio_set(int which, int who, int ioprio) -+{ -+ int class = IOPRIO_PRIO_CLASS(ioprio); -+ int data = IOPRIO_PRIO_DATA(ioprio); -+ struct task_struct *p, *g; -+ struct user_struct *user; -+ int ret; -+ -+ switch (class) { -+ case IOPRIO_CLASS_RT: -+ if (!capable(CAP_SYS_ADMIN)) -+ return -EPERM; -+ /* fall through, rt has prio field too */ -+ case IOPRIO_CLASS_BE: -+ if (data >= IOPRIO_BE_NR || data < 0) -+ return -EINVAL; -+ -+ break; -+ case IOPRIO_CLASS_IDLE: -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ ret = -ESRCH; -+ read_lock_irq(&tasklist_lock); -+ switch (which) { -+ case IOPRIO_WHO_PROCESS: -+ if (!who) -+ p = current; -+ else -+ p = find_task_by_pid(who); -+ if (p) -+ ret = set_task_ioprio(p, ioprio); -+ break; -+ case IOPRIO_WHO_PGRP: -+ if (!who) -+ who = process_group(current); -+ do_each_task_pid(who, PIDTYPE_PGID, p) { -+ ret = set_task_ioprio(p, ioprio); -+ if (ret) -+ break; -+ } while_each_task_pid(who, PIDTYPE_PGID, p); -+ break; -+ case IOPRIO_WHO_USER: -+ if (!who) -+ user = current->user; -+ else -+ user = find_user(who); -+ -+ if (!user) -+ break; -+ -+ do_each_thread(g, p) { -+ if (p->uid != who) -+ continue; -+ ret = set_task_ioprio(p, ioprio); -+ if (ret) -+ break; -+ } while_each_thread(g, p); -+ -+ if (who) -+ free_uid(user); -+ break; -+ default: -+ ret = -EINVAL; -+ } -+ -+ read_unlock_irq(&tasklist_lock); -+ return ret; -+} -+ -+asmlinkage int sys_ioprio_get(int which, int who) -+{ -+ struct task_struct *g, *p; -+ struct user_struct *user; -+ int ret = -ESRCH; -+ -+ read_lock_irq(&tasklist_lock); -+ switch (which) { -+ case IOPRIO_WHO_PROCESS: -+ if (!who) -+ p = current; -+ else -+ p = find_task_by_pid(who); -+ if (p) -+ ret = p->ioprio; -+ break; -+ case IOPRIO_WHO_PGRP: -+ if (!who) -+ who = process_group(current); -+ do_each_task_pid(who, PIDTYPE_PGID, p) { -+ if (ret == -ESRCH) -+ ret = p->ioprio; -+ else -+ ret = ioprio_best(ret, p->ioprio); -+ } while_each_task_pid(who, PIDTYPE_PGID, p); -+ break; -+ case IOPRIO_WHO_USER: -+ if (!who) -+ user = current->user; -+ else -+ user = find_user(who); -+ -+ if (!user) -+ break; -+ -+ do_each_thread(g, p) { -+ if (p->uid != user->uid) -+ continue; -+ if (ret == -ESRCH) -+ ret = p->ioprio; -+ else -+ ret = ioprio_best(ret, p->ioprio); -+ } while_each_thread(g, p); -+ -+ if (who) -+ free_uid(user); -+ break; -+ default: -+ ret = -EINVAL; -+ } -+ -+ read_unlock_irq(&tasklist_lock); -+ return ret; -+} -+ -diff -Naur 2.6.12-5.0-org/fs/Makefile 2.6.12-5.0-patched/fs/Makefile ---- 2.6.12-5.0-org/fs/Makefile 2007-07-26 00:55:01.000000000 +0200 -+++ 2.6.12-5.0-patched/fs/Makefile 2007-12-11 12:34:52.000000000 +0100 -@@ -10,6 +10,7 @@ - ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ - attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ - seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ -+ ioprio.o - - obj-$(CONFIG_EPOLL) += eventpoll.o - obj-$(CONFIG_COMPAT) += compat.o -diff -Naur 2.6.12-5.0-org/fs/reiserfs/journal.c 2.6.12-5.0-patched/fs/reiserfs/journal.c ---- 2.6.12-5.0-org/fs/reiserfs/journal.c 2007-07-26 00:55:15.000000000 +0200 -+++ 2.6.12-5.0-patched/fs/reiserfs/journal.c 2007-12-11 12:34:52.000000000 +0100 -@@ -645,18 +645,22 @@ - - static void write_chunk(struct buffer_chunk *chunk) { - int i; -+ get_fs_excl(); - for (i = 0; i < chunk->nr ; i++) { - submit_logged_buffer(chunk->bh[i]) ; - } - chunk->nr = 0; -+ put_fs_excl(); - } - - static void write_ordered_chunk(struct buffer_chunk *chunk) { - int i; -+ get_fs_excl(); - for (i = 0; i < chunk->nr ; i++) { - submit_ordered_buffer(chunk->bh[i]) ; - } - chunk->nr = 0; -+ put_fs_excl(); - } - - static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, -@@ -918,6 +922,8 @@ - return 0 ; - } - -+ get_fs_excl(); -+ - /* before we can put our commit blocks on disk, we have to make sure everyone older than - ** us is on disk too - */ -@@ -1055,6 +1061,7 @@ - - if (retval) - reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__); -+ put_fs_excl(); - return retval; - } - -@@ -1251,6 +1258,8 @@ - return 0 ; - } - -+ get_fs_excl(); -+ - /* if all the work is already done, get out of here */ - if (atomic_read(&(jl->j_nonzerolen)) <= 0 && - atomic_read(&(jl->j_commit_left)) <= 0) { -@@ -1450,6 +1459,7 @@ - put_journal_list(s, jl); - if (flushall) - up(&journal->j_flush_sem); -+ put_fs_excl(); - return err ; - } - -@@ -2717,6 +2727,7 @@ - th->t_trans_id = journal->j_trans_id ; - unlock_journal(p_s_sb) ; - INIT_LIST_HEAD (&th->t_list); -+ get_fs_excl(); - return 0 ; - - out_fail: -@@ -3524,6 +3535,7 @@ - BUG_ON (th->t_refcount > 1); - BUG_ON (!th->t_trans_id); - -+ put_fs_excl(); - current->journal_info = th->t_handle_save; - reiserfs_check_lock_depth(p_s_sb, "journal end"); - if (journal->j_len == 0) { -diff -Naur 2.6.12-5.0-org/include/asm-mips/futex.h 2.6.12-5.0-patched/include/asm-mips/futex.h ---- 2.6.12-5.0-org/include/asm-mips/futex.h 1970-01-01 01:00:00.000000000 +0100 -+++ 2.6.12-5.0-patched/include/asm-mips/futex.h 2007-12-11 12:34:52.000000000 +0100 -@@ -0,0 +1,134 @@ -+#ifndef _ASM_FUTEX_H -+#define _ASM_FUTEX_H -+ -+#ifdef __KERNEL__ -+ -+#include <linux/config.h> -+#include <linux/futex.h> -+#include <asm/errno.h> -+#include <asm/uaccess.h> -+#include <asm/war.h> -+ -+#ifdef CONFIG_SMP -+#define __FUTEX_SMP_SYNC " sync \n" -+#else -+#define __FUTEX_SMP_SYNC -+#endif -+ -+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ -+{ \ -+ if (cpu_has_llsc && R10000_LLSC_WAR) { \ -+ __asm__ __volatile__( \ -+ " .set push \n" \ -+ " .set noat \n" \ -+ " .set mips3 \n" \ -+ "1: ll %1, %4 # __futex_atomic_op \n" \ -+ " .set mips0 \n" \ -+ " " insn " \n" \ -+ " .set mips3 \n" \ -+ "2: sc $1, %2 \n" \ -+ " beqzl $1, 1b \n" \ -+ __FUTEX_SMP_SYNC \ -+ "3: \n" \ -+ " .set pop \n" \ -+ " .set mips0 \n" \ -+ " .section .fixup,\"ax\" \n" \ -+ "4: li %0, %6 \n" \ -+ " j 2b \n" \ -+ " .previous \n" \ -+ " .section __ex_table,\"a\" \n" \ -+ " "__UA_ADDR "\t1b, 4b \n" \ -+ " "__UA_ADDR "\t2b, 4b \n" \ -+ " .previous \n" \ -+ : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ -+ : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ -+ : "memory"); \ -+ } else if (cpu_has_llsc) { \ -+ __asm__ __volatile__( \ -+ " .set push \n" \ -+ " .set noat \n" \ -+ " .set mips3 \n" \ -+ "1: ll %1, %4 # __futex_atomic_op \n" \ -+ " .set mips0 \n" \ -+ " " insn " \n" \ -+ " .set mips3 \n" \ -+ "2: sc $1, %2 \n" \ -+ " beqz $1, 1b \n" \ -+ __FUTEX_SMP_SYNC \ -+ "3: \n" \ -+ " .set pop \n" \ -+ " .set mips0 \n" \ -+ " .section .fixup,\"ax\" \n" \ -+ "4: li %0, %6 \n" \ -+ " j 2b \n" \ -+ " .previous \n" \ -+ " .section __ex_table,\"a\" \n" \ -+ " "__UA_ADDR "\t1b, 4b \n" \ -+ " "__UA_ADDR "\t2b, 4b \n" \ -+ " .previous \n" \ -+ : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ -+ : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ -+ : "memory"); \ -+ } else \ -+ ret = -ENOSYS; \ -+} -+ -+static inline int -+futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -+{ -+ int op = (encoded_op >> 28) & 7; -+ int cmp = (encoded_op >> 24) & 15; -+ int oparg = (encoded_op << 8) >> 20; -+ int cmparg = (encoded_op << 20) >> 20; -+ int oldval = 0, ret; -+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) -+ oparg = 1 << oparg; -+ -+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) -+ return -EFAULT; -+ -+ inc_preempt_count(); -+ -+ switch (op) { -+ case FUTEX_OP_SET: -+ __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); -+ break; -+ -+ case FUTEX_OP_ADD: -+ __futex_atomic_op("addu $1, %1, %z5", -+ ret, oldval, uaddr, oparg); -+ break; -+ case FUTEX_OP_OR: -+ __futex_atomic_op("or $1, %1, %z5", -+ ret, oldval, uaddr, oparg); -+ break; -+ case FUTEX_OP_ANDN: -+ __futex_atomic_op("and $1, %1, %z5", -+ ret, oldval, uaddr, ~oparg); -+ break; -+ case FUTEX_OP_XOR: -+ __futex_atomic_op("xor $1, %1, %z5", -+ ret, oldval, uaddr, oparg); -+ break; -+ default: -+ ret = -ENOSYS; -+ } -+ -+ dec_preempt_count(); -+ -+ if (!ret) { -+ switch (cmp) { -+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; -+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; -+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; -+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; -+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; -+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; -+ default: ret = -ENOSYS; -+ } -+ } -+ return ret; -+} -+ -+#endif -+#endif -diff -Naur 2.6.12-5.0-org/include/asm-mips/uaccess.h 2.6.12-5.0-patched/include/asm-mips/uaccess.h ---- 2.6.12-5.0-org/include/asm-mips/uaccess.h 2007-07-26 00:56:08.000000000 +0200 -+++ 2.6.12-5.0-patched/include/asm-mips/uaccess.h 2007-12-11 12:34:52.000000000 +0100 -@@ -234,39 +234,72 @@ - - #define __get_user_nocheck(x,ptr,size) \ - ({ \ -- __typeof(*(ptr)) __gu_val = (__typeof(*(ptr))) 0; \ - long __gu_err = 0; \ -- \ - might_sleep(); \ - switch (size) { \ -- case 1: __get_user_asm("lb", ptr); break; \ -- case 2: __get_user_asm("lh", ptr); break; \ -- case 4: __get_user_asm("lw", ptr); break; \ -- case 8: __GET_USER_DW(ptr); break; \ -+ case 1: { \ -+ s8 __gu_val = (s8) 0; \ -+ __get_user_asm("lb", ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ -+ case 2: { \ -+ s16 __gu_val = (s16) 0; \ -+ __get_user_asm("lh", ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ -+ case 4: { \ -+ s32 __gu_val = (s32) 0; \ -+ __get_user_asm("lw", ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ -+ case 8: { \ -+ s64 __gu_val = (s64) 0; \ -+ __GET_USER_DW(ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ - default: __get_user_unknown(); break; \ - } \ -- (x) = (__typeof__(*(ptr))) __gu_val; \ - __gu_err; \ - }) - - #define __get_user_check(x,ptr,size) \ - ({ \ - const __typeof__(*(ptr)) __user * __gu_addr = (ptr); \ -- __typeof__(*(ptr)) __gu_val = 0; \ - long __gu_err = -EFAULT; \ -- \ - might_sleep(); \ -- \ - if (likely(access_ok(VERIFY_READ, __gu_addr, size))) { \ - switch (size) { \ -- case 1: __get_user_asm("lb", __gu_addr); break; \ -- case 2: __get_user_asm("lh", __gu_addr); break; \ -- case 4: __get_user_asm("lw", __gu_addr); break; \ -- case 8: __GET_USER_DW(__gu_addr); break; \ -+ case 1: { \ -+ s8 __gu_val = (s8) 0; \ -+ __get_user_asm("lb", ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ -+ case 2: { \ -+ s16 __gu_val = (s16) 0; \ -+ __get_user_asm("lh", ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ -+ case 4: { \ -+ s32 __gu_val = (s32) 0; \ -+ __get_user_asm("lw", ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ -+ case 8: { \ -+ s64 __gu_val = (s64) 0; \ -+ __GET_USER_DW(ptr); \ -+ (x) = (__typeof__(*(ptr))) __gu_val; \ -+ break; \ -+ } \ - default: __get_user_unknown(); break; \ - } \ - } \ -- (x) = (__typeof__(*(ptr))) __gu_val; \ - __gu_err; \ - }) - -diff -Naur 2.6.12-5.0-org/include/asm-mips/unistd.h 2.6.12-5.0-patched/include/asm-mips/unistd.h ---- 2.6.12-5.0-org/include/asm-mips/unistd.h 2007-07-26 00:56:08.000000000 +0200 -+++ 2.6.12-5.0-patched/include/asm-mips/unistd.h 2007-12-11 12:34:52.000000000 +0100 -@@ -304,16 +304,18 @@ - #define __NR_request_key (__NR_Linux + 281) - #define __NR_keyctl (__NR_Linux + 282) - #define __NR_set_thread_area (__NR_Linux + 283) -+#define __NR_sys_ioprio_set (__NR_Linux + 284) -+#define __NR_sys_ioprio_get (__NR_Linux + 285) - - /* - * Offset of the last Linux o32 flavoured syscall - */ --#define __NR_Linux_syscalls 283 -+#define __NR_Linux_syscalls 285 - - #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ - - #define __NR_O32_Linux 4000 --#define __NR_O32_Linux_syscalls 283 -+#define __NR_O32_Linux_syscalls 285 - - #if _MIPS_SIM == _MIPS_SIM_ABI64 - -diff -Naur 2.6.12-5.0-org/include/linux/bio.h 2.6.12-5.0-patched/include/linux/bio.h ---- 2.6.12-5.0-org/include/linux/bio.h 2007-07-26 00:57:02.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/bio.h 2007-12-11 12:34:52.000000000 +0100 -@@ -22,6 +22,7 @@ - - #include <linux/highmem.h> - #include <linux/mempool.h> -+#include <linux/ioprio.h> - - /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ - #include <asm/io.h> -@@ -153,6 +154,19 @@ - #define BIO_RW_SYNC 4 - - /* -+ * upper 16 bits of bi_rw define the io priority of this bio -+ */ -+#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) -+#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) -+#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) -+ -+#define bio_set_prio(bio, prio) do { \ -+ WARN_ON(prio >= (1 << IOPRIO_BITS)); \ -+ (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ -+ (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ -+} while (0) -+ -+/* - * various member access, note that bio_data should of course not be used - * on highmem page vectors - */ -diff -Naur 2.6.12-5.0-org/include/linux/blkdev.h 2.6.12-5.0-patched/include/linux/blkdev.h ---- 2.6.12-5.0-org/include/linux/blkdev.h 2007-07-26 00:57:02.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/blkdev.h 2007-12-11 12:34:52.000000000 +0100 -@@ -54,16 +54,23 @@ - - struct cfq_queue; - struct cfq_io_context { -- void (*dtor)(struct cfq_io_context *); -- void (*exit)(struct cfq_io_context *); -- -- struct io_context *ioc; -- - /* - * circular list of cfq_io_contexts belonging to a process io context - */ - struct list_head list; - struct cfq_queue *cfqq; -+ void *key; -+ -+ struct io_context *ioc; -+ -+ unsigned long last_end_request; -+ unsigned long last_queue; -+ unsigned long ttime_total; -+ unsigned long ttime_samples; -+ unsigned long ttime_mean; -+ -+ void (*dtor)(struct cfq_io_context *); -+ void (*exit)(struct cfq_io_context *); - }; - - /* -@@ -73,7 +80,9 @@ - */ - struct io_context { - atomic_t refcount; -- pid_t pid; -+ struct task_struct *task; -+ -+ int (*set_ioprio)(struct io_context *, unsigned int); - - /* - * For request batching -@@ -81,8 +90,6 @@ - unsigned long last_waited; /* Time last woken after wait for request */ - int nr_batch_requests; /* Number of requests left in the batch */ - -- spinlock_t lock; -- - struct as_io_context *aic; - struct cfq_io_context *cic; - }; -@@ -134,6 +141,8 @@ - - void *elevator_private; - -+ unsigned short ioprio; -+ - int rq_status; /* should split this into a few status bits */ - struct gendisk *rq_disk; - int errors; -diff -Naur 2.6.12-5.0-org/include/linux/elevator.h 2.6.12-5.0-patched/include/linux/elevator.h ---- 2.6.12-5.0-org/include/linux/elevator.h 2007-07-26 00:56:58.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/elevator.h 2007-12-11 12:34:52.000000000 +0100 -@@ -16,9 +16,9 @@ - typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); - typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); - typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); --typedef int (elevator_may_queue_fn) (request_queue_t *, int); -+typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); - --typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int); -+typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); - typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); - typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); - -@@ -96,9 +96,9 @@ - extern struct request *elv_latter_request(request_queue_t *, struct request *); - extern int elv_register_queue(request_queue_t *q); - extern void elv_unregister_queue(request_queue_t *q); --extern int elv_may_queue(request_queue_t *, int); -+extern int elv_may_queue(request_queue_t *, int, struct bio *); - extern void elv_completed_request(request_queue_t *, struct request *); --extern int elv_set_request(request_queue_t *, struct request *, int); -+extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int); - extern void elv_put_request(request_queue_t *, struct request *); - - /* -diff -Naur 2.6.12-5.0-org/include/linux/fs.h 2.6.12-5.0-patched/include/linux/fs.h ---- 2.6.12-5.0-org/include/linux/fs.h 2007-07-26 00:57:01.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/fs.h 2007-12-11 12:34:52.000000000 +0100 -@@ -213,6 +213,7 @@ - #include <linux/radix-tree.h> - #include <linux/prio_tree.h> - #include <linux/init.h> -+#include <linux/sched.h> - - #include <asm/atomic.h> - #include <asm/semaphore.h> -@@ -820,16 +821,34 @@ - #define vfs_check_frozen(sb, level) \ - wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) - -+static inline void get_fs_excl(void) -+{ -+ atomic_inc(¤t->fs_excl); -+} -+ -+static inline void put_fs_excl(void) -+{ -+ atomic_dec(¤t->fs_excl); -+} -+ -+static inline int has_fs_excl(void) -+{ -+ return atomic_read(¤t->fs_excl); -+} -+ -+ - /* - * Superblock locking. - */ - static inline void lock_super(struct super_block * sb) - { -+ get_fs_excl(); - down(&sb->s_lock); - } - - static inline void unlock_super(struct super_block * sb) - { -+ put_fs_excl(); - up(&sb->s_lock); - } - -diff -Naur 2.6.12-5.0-org/include/linux/futex.h 2.6.12-5.0-patched/include/linux/futex.h ---- 2.6.12-5.0-org/include/linux/futex.h 2007-07-26 00:57:03.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/futex.h 2007-12-11 12:34:52.000000000 +0100 -@@ -4,14 +4,40 @@ - /* Second argument to futex syscall */ - - --#define FUTEX_WAIT (0) --#define FUTEX_WAKE (1) --#define FUTEX_FD (2) --#define FUTEX_REQUEUE (3) --#define FUTEX_CMP_REQUEUE (4) -+#define FUTEX_WAIT 0 -+#define FUTEX_WAKE 1 -+#define FUTEX_FD 2 -+#define FUTEX_REQUEUE 3 -+#define FUTEX_CMP_REQUEUE 4 -+#define FUTEX_WAKE_OP 5 - - long do_futex(unsigned long uaddr, int op, int val, - unsigned long timeout, unsigned long uaddr2, int val2, - int val3); - -+#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ -+#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ -+#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ -+#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */ -+#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */ -+ -+#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */ -+ -+#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */ -+#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */ -+#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */ -+#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */ -+#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */ -+#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */ -+ -+/* FUTEX_WAKE_OP will perform atomically -+ int oldval = *(int *)UADDR2; -+ *(int *)UADDR2 = oldval OP OPARG; -+ if (oldval CMP CMPARG) -+ wake UADDR2; */ -+ -+#define FUTEX_OP(op, oparg, cmp, cmparg) \ -+ (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ -+ | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) -+ - #endif -diff -Naur 2.6.12-5.0-org/include/linux/init_task.h 2.6.12-5.0-patched/include/linux/init_task.h ---- 2.6.12-5.0-org/include/linux/init_task.h 2007-07-26 00:56:58.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/init_task.h 2007-12-11 12:34:52.000000000 +0100 -@@ -81,6 +81,7 @@ - .mm = NULL, \ - .active_mm = &init_mm, \ - .run_list = LIST_HEAD_INIT(tsk.run_list), \ -+ .ioprio = 0, \ - .time_slice = HZ, \ - .tasks = LIST_HEAD_INIT(tsk.tasks), \ - .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ -@@ -111,6 +112,7 @@ - .switch_lock = SPIN_LOCK_UNLOCKED, \ - .journal_info = NULL, \ - .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ -+ .fs_excl = ATOMIC_INIT(0), \ - } - - -diff -Naur 2.6.12-5.0-org/include/linux/input.h 2.6.12-5.0-patched/include/linux/input.h ---- 2.6.12-5.0-org/include/linux/input.h 2007-07-26 00:56:59.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/input.h 2007-12-11 12:34:52.000000000 +0100 -@@ -92,6 +92,7 @@ - #define EV_FF 0x15 - #define EV_PWR 0x16 - #define EV_FF_STATUS 0x17 -+#define EV_NO_CONSOLE 0x1e - #define EV_MAX 0x1f - - /* -diff -Naur 2.6.12-5.0-org/include/linux/ioprio.h 2.6.12-5.0-patched/include/linux/ioprio.h ---- 2.6.12-5.0-org/include/linux/ioprio.h 1970-01-01 01:00:00.000000000 +0100 -+++ 2.6.12-5.0-patched/include/linux/ioprio.h 2007-12-11 12:34:52.000000000 +0100 -@@ -0,0 +1,88 @@ -+#ifndef IOPRIO_H -+#define IOPRIO_H -+ -+#include <linux/sched.h> -+ -+/* -+ * Gives us 8 prio classes with 13-bits of data for each class -+ */ -+#define IOPRIO_BITS (16) -+#define IOPRIO_CLASS_SHIFT (13) -+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) -+ -+#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) -+#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) -+#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) -+ -+#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) -+ -+/* -+ * These are the io priority groups as implemented by CFQ. RT is the realtime -+ * class, it always gets premium service. BE is the best-effort scheduling -+ * class, the default for any process. IDLE is the idle scheduling class, it -+ * is only served when no one else is using the disk. -+ */ -+enum { -+ IOPRIO_CLASS_NONE, -+ IOPRIO_CLASS_RT, -+ IOPRIO_CLASS_BE, -+ IOPRIO_CLASS_IDLE, -+}; -+ -+/* -+ * 8 best effort priority levels are supported -+ */ -+#define IOPRIO_BE_NR (8) -+ -+asmlinkage int sys_ioprio_set(int, int, int); -+asmlinkage int sys_ioprio_get(int, int); -+ -+enum { -+ IOPRIO_WHO_PROCESS = 1, -+ IOPRIO_WHO_PGRP, -+ IOPRIO_WHO_USER, -+}; -+ -+/* -+ * if process has set io priority explicitly, use that. if not, convert -+ * the cpu scheduler nice value to an io priority -+ */ -+#define IOPRIO_NORM (4) -+static inline int task_ioprio(struct task_struct *task) -+{ -+ WARN_ON(!ioprio_valid(task->ioprio)); -+ return IOPRIO_PRIO_DATA(task->ioprio); -+} -+ -+static inline int task_nice_ioprio(struct task_struct *task) -+{ -+ return (task_nice(task) + 20) / 5; -+} -+ -+/* -+ * For inheritance, return the highest of the two given priorities -+ */ -+static inline int ioprio_best(unsigned short aprio, unsigned short bprio) -+{ -+ unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); -+ unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); -+ -+ if (!ioprio_valid(aprio)) -+ return bprio; -+ if (!ioprio_valid(bprio)) -+ return aprio; -+ -+ if (aclass == IOPRIO_CLASS_NONE) -+ aclass = IOPRIO_CLASS_BE; -+ if (bclass == IOPRIO_CLASS_NONE) -+ bclass = IOPRIO_CLASS_BE; -+ -+ if (aclass == bclass) -+ return min(aprio, bprio); -+ if (aclass > bclass) -+ return bprio; -+ else -+ return aprio; -+} -+ -+#endif -diff -Naur 2.6.12-5.0-org/include/linux/sched.h 2.6.12-5.0-patched/include/linux/sched.h ---- 2.6.12-5.0-org/include/linux/sched.h 2007-07-26 00:57:07.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/sched.h 2007-12-11 12:34:52.000000000 +0100 -@@ -584,6 +584,8 @@ - struct list_head run_list; - prio_array_t *array; - -+ unsigned short ioprio; -+ - unsigned long sleep_avg; - unsigned long long timestamp, last_ran; - unsigned long long sched_time; /* sched_clock time spent running */ -@@ -740,6 +742,7 @@ - nodemask_t mems_allowed; - int cpuset_mems_generation; - #endif -+ atomic_t fs_excl; /* holding fs exclusive resources */ - }; - - static inline pid_t process_group(struct task_struct *tsk) -@@ -1089,7 +1092,8 @@ - - /* - * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring -- * subscriptions and synchronises with wait4(). Also used in procfs. -+ * subscriptions and synchronises with wait4(). Also used in procfs. Also -+ * pins the final release of task.io_context. - * - * Nests both inside and outside of read_lock(&tasklist_lock). - * It must not be nested with write_lock_irq(&tasklist_lock), -diff -Naur 2.6.12-5.0-org/include/linux/time.h 2.6.12-5.0-patched/include/linux/time.h ---- 2.6.12-5.0-org/include/linux/time.h 2007-07-26 00:57:01.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/time.h 2007-12-11 12:34:52.000000000 +0100 -@@ -84,6 +84,12 @@ - )*60 + sec; /* finally seconds */ - } - -+/* -+ * Returns true if the timespec is nor, false is denorm: -+ */ -+#define timespec_valid(ts) \ -+ (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) -+ - extern struct timespec xtime; - extern struct timespec wall_to_monotonic; - extern seqlock_t xtime_lock; -diff -Naur 2.6.12-5.0-org/include/linux/writeback.h 2.6.12-5.0-patched/include/linux/writeback.h ---- 2.6.12-5.0-org/include/linux/writeback.h 2007-07-26 00:57:08.000000000 +0200 -+++ 2.6.12-5.0-patched/include/linux/writeback.h 2007-12-11 12:34:52.000000000 +0100 -@@ -14,11 +14,13 @@ - * Yes, writeback.h requires sched.h - * No, sched.h is not included from here. - */ --static inline int current_is_pdflush(void) -+static inline int task_is_pdflush(struct task_struct *task) - { -- return current->flags & PF_FLUSHER; -+ return task->flags & PF_FLUSHER; - } - -+#define current_is_pdflush() task_is_pdflush(current) -+ - /* - * fs/fs-writeback.c - */ -diff -Naur 2.6.12-5.0-org/kernel/exit.c 2.6.12-5.0-patched/kernel/exit.c ---- 2.6.12-5.0-org/kernel/exit.c 2007-07-26 00:57:20.000000000 +0200 -+++ 2.6.12-5.0-patched/kernel/exit.c 2007-12-11 12:34:52.000000000 +0100 -@@ -779,6 +779,8 @@ - - profile_task_exit(tsk); - -+ WARN_ON(atomic_read(&tsk->fs_excl)); -+ - if (unlikely(in_interrupt())) - panic("Aiee, killing interrupt handler!"); - if (unlikely(!tsk->pid)) -diff -Naur 2.6.12-5.0-org/kernel/fork.c 2.6.12-5.0-patched/kernel/fork.c ---- 2.6.12-5.0-org/kernel/fork.c 2007-07-26 00:57:20.000000000 +0200 -+++ 2.6.12-5.0-patched/kernel/fork.c 2007-12-11 12:34:52.000000000 +0100 -@@ -1084,6 +1084,11 @@ - spin_unlock(¤t->sighand->siglock); - } - -+ /* -+ * inherit ioprio -+ */ -+ p->ioprio = current->ioprio; -+ - SET_LINKS(p); - if (unlikely(p->ptrace & PT_PTRACED)) - __ptrace_link(p, current->parent); -diff -Naur 2.6.12-5.0-org/kernel/futex.c 2.6.12-5.0-patched/kernel/futex.c ---- 2.6.12-5.0-org/kernel/futex.c 2007-07-26 00:57:20.000000000 +0200 -+++ 2.6.12-5.0-patched/kernel/futex.c 2007-12-11 12:34:52.000000000 +0100 -@@ -40,6 +40,7 @@ - #include <linux/pagemap.h> - #include <linux/syscalls.h> - #include <linux/signal.h> -+#include <asm/futex.h> - - #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) - -@@ -201,22 +202,6 @@ - * for a rare case, so we simply fetch the page. - */ - -- /* -- * Do a quick atomic lookup first - this is the fastpath. -- */ -- spin_lock(¤t->mm->page_table_lock); -- page = follow_page(mm, uaddr, 0); -- if (likely(page != NULL)) { -- key->shared.pgoff = -- page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); -- spin_unlock(¤t->mm->page_table_lock); -- return 0; -- } -- spin_unlock(¤t->mm->page_table_lock); -- -- /* -- * Do it the general way. -- */ - err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); - if (err >= 0) { - key->shared.pgoff = -@@ -327,6 +312,123 @@ - } - - /* -+ * Wake up all waiters hashed on the physical page that is mapped -+ * to this virtual address: -+ */ -+static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op) -+{ -+ union futex_key key1, key2; -+ struct futex_hash_bucket *bh1, *bh2; -+ struct list_head *head; -+ struct futex_q *this, *next; -+ int ret, op_ret, attempt = 0; -+ -+retryfull: -+ down_read(¤t->mm->mmap_sem); -+ -+ ret = get_futex_key(uaddr1, &key1); -+ if (unlikely(ret != 0)) -+ goto out; -+ ret = get_futex_key(uaddr2, &key2); -+ if (unlikely(ret != 0)) -+ goto out; -+ -+ bh1 = hash_futex(&key1); -+ bh2 = hash_futex(&key2); -+ -+retry: -+ if (bh1 < bh2) -+ spin_lock(&bh1->lock); -+ spin_lock(&bh2->lock); -+ if (bh1 > bh2) -+ spin_lock(&bh1->lock); -+ -+ op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2); -+ if (unlikely(op_ret < 0)) { -+ int dummy; -+ -+ spin_unlock(&bh1->lock); -+ if (bh1 != bh2) -+ spin_unlock(&bh2->lock); -+ -+ if (unlikely(op_ret != -EFAULT)) { -+ ret = op_ret; -+ goto out; -+ } -+ -+ /* futex_atomic_op_inuser needs to both read and write -+ * *(int __user *)uaddr2, but we can't modify it -+ * non-atomically. Therefore, if get_user below is not -+ * enough, we need to handle the fault ourselves, while -+ * still holding the mmap_sem. */ -+ if (attempt++) { -+ struct vm_area_struct * vma; -+ struct mm_struct *mm = current->mm; -+ -+ ret = -EFAULT; -+ if (attempt >= 2 || -+ !(vma = find_vma(mm, uaddr2)) || -+ vma->vm_start > uaddr2 || -+ !(vma->vm_flags & VM_WRITE)) -+ goto out; -+ -+ switch (handle_mm_fault(mm, vma, uaddr2, 1)) { -+ case VM_FAULT_MINOR: -+ current->min_flt++; -+ break; -+ case VM_FAULT_MAJOR: -+ current->maj_flt++; -+ break; -+ default: -+ goto out; -+ } -+ goto retry; -+ } -+ -+ /* If we would have faulted, release mmap_sem, -+ * fault it in and start all over again. */ -+ up_read(¤t->mm->mmap_sem); -+ -+ ret = get_user(dummy, (int __user *)uaddr2); -+ if (ret) -+ return ret; -+ -+ goto retryfull; -+ } -+ -+ head = &bh1->chain; -+ -+ list_for_each_entry_safe(this, next, head, list) { -+ if (match_futex (&this->key, &key1)) { -+ wake_futex(this); -+ if (++ret >= nr_wake) -+ break; -+ } -+ } -+ -+ if (op_ret > 0) { -+ head = &bh2->chain; -+ -+ op_ret = 0; -+ list_for_each_entry_safe(this, next, head, list) { -+ if (match_futex (&this->key, &key2)) { -+ wake_futex(this); -+ if (++op_ret >= nr_wake2) -+ break; -+ } -+ } -+ ret += op_ret; -+ } -+ -+ spin_unlock(&bh1->lock); -+ if (bh1 != bh2) -+ spin_unlock(&bh2->lock); -+out: -+ up_read(¤t->mm->mmap_sem); -+ return ret; -+} -+ -+/* - * Requeue all waiters hashed on one physical page to another - * physical page. - */ -@@ -740,6 +842,9 @@ - case FUTEX_CMP_REQUEUE: - ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); - break; -+ case FUTEX_WAKE_OP: -+ ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); -+ break; - default: - ret = -ENOSYS; - } -@@ -755,9 +860,11 @@ - unsigned long timeout = MAX_SCHEDULE_TIMEOUT; - int val2 = 0; - -- if ((op == FUTEX_WAIT) && utime) { -+ if (utime && (op == FUTEX_WAIT)) { - if (copy_from_user(&t, utime, sizeof(t)) != 0) - return -EFAULT; -+ if (!timespec_valid(&t)) -+ return -EINVAL; - timeout = timespec_to_jiffies(&t) + 1; - } - /* -diff -Naur 2.6.12-5.0-org/kernel/sched.c 2.6.12-5.0-patched/kernel/sched.c ---- 2.6.12-5.0-org/kernel/sched.c 2007-07-26 00:57:20.000000000 +0200 -+++ 2.6.12-5.0-patched/kernel/sched.c 2007-12-11 12:34:52.000000000 +0100 -@@ -3302,15 +3302,7 @@ - { - return TASK_NICE(p); - } -- --/* -- * The only users of task_nice are binfmt_elf and binfmt_elf32. -- * binfmt_elf is no longer modular, but binfmt_elf32 still is. -- * Therefore, task_nice is needed if there is a compat_mode. -- */ --#ifdef CONFIG_COMPAT - EXPORT_SYMBOL_GPL(task_nice); --#endif - - /** - * idle_cpu - is a given cpu idle currently? -diff -Naur 2.6.12-5.0-org/kernel/signal.c 2.6.12-5.0-patched/kernel/signal.c ---- 2.6.12-5.0-org/kernel/signal.c 2007-07-26 00:57:20.000000000 +0200 -+++ 2.6.12-5.0-patched/kernel/signal.c 2007-12-11 12:34:52.000000000 +0100 -@@ -1971,7 +1971,7 @@ - } - - EXPORT_SYMBOL(recalc_sigpending); --EXPORT_SYMBOL_GPL(dequeue_signal); -+EXPORT_SYMBOL(dequeue_signal); - EXPORT_SYMBOL(flush_signals); - EXPORT_SYMBOL(force_sig); - EXPORT_SYMBOL(kill_pg); |