diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/drivers/block/ll_rw_blk.c linux-2.4-xfs/linux/drivers/block/ll_rw_blk.c --- temp/linux-2.4-xfs/linux.orig/drivers/block/ll_rw_blk.c Fri Nov 24 19:31:51 2000 +++ linux-2.4-xfs/linux/drivers/block/ll_rw_blk.c Fri Nov 24 19:26:38 2000 @@ -1028,7 +1028,7 @@ void generic_make_request (int rw, struct buffer_head * bh, struct kiobuf * kiobuf, kdev_t dev, - unsigned long blocknr, size_t blksize) + unsigned long sect) { int major, minor; unsigned int sector, count; @@ -1040,8 +1040,9 @@ dev = bh->b_rdev; } else { count = kiobuf->length >> 9; - sector = blocknr * (blksize >> 9); + sector = sect; } + major = MAJOR(dev); minor = MINOR(dev); @@ -1187,7 +1188,7 @@ bh->b_rdev = bh->b_dev; bh->b_rsector = bh->b_blocknr * (bh->b_size>>9); - generic_make_request(rw, bh, NULL, 0, 0, 0); + generic_make_request(rw, bh, NULL, 0, 0); } return; @@ -1231,7 +1232,7 @@ * should try ll_rw_block() * for non-SCSI (e.g. IDE) disks. */ - if (!SCSI_DISK_MAJOR(MAJOR(dev))) { + if (!SCSI_DISK_MAJOR(MAJOR(dev)) || MAJOR(dev) != MD_MAJOR) { *error = -ENOSYS; goto end_io; } @@ -1272,7 +1273,8 @@ BUG(); } - generic_make_request(rw, NULL, kiobuf, dev, blocknr, sector); + generic_make_request(rw, NULL, kiobuf, dev, (blocknr * (sector >> 9))); + if (kiobuf->errno != 0) { *error = kiobuf->errno; goto end_io; diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/drivers/md/md.c linux-2.4-xfs/linux/drivers/md/md.c --- temp/linux-2.4-xfs/linux.orig/drivers/md/md.c Fri Nov 24 19:32:11 2000 +++ linux-2.4-xfs/linux/drivers/md/md.c Fri Nov 24 19:56:27 2000 @@ -170,12 +170,13 @@ mddev_map[minor].data = NULL; } -static int md_make_request (request_queue_t *q, int rw, struct buffer_head * bh) +static int md_make_request (request_queue_t *q, int rw, struct buffer_head * bh, + struct kiobuf *kiobuf, kdev_t dev, unsigned int sector, unsigned int count) { - mddev_t *mddev = kdev_to_mddev(bh->b_rdev); + mddev_t *mddev = kdev_to_mddev(dev); if (mddev && mddev->pers) - return mddev->pers->make_request(mddev, rw, bh); + return mddev->pers->make_request(mddev, rw, bh, kiobuf, dev, sector, count); else { buffer_IO_error(bh); return -1; diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/drivers/md/raid1.c linux-2.4-xfs/linux/drivers/md/raid1.c --- temp/linux-2.4-xfs/linux.orig/drivers/md/raid1.c Fri Nov 24 19:32:11 2000 +++ linux-2.4-xfs/linux/drivers/md/raid1.c Fri Nov 24 19:50:46 2000 @@ -12,6 +12,8 @@ * Fixes to reconstruction by Jakob Østergaard" * Various fixes by Neil Brown * + * kiobuf based IO support by Marcelo Tosatti , 2000 + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) @@ -47,6 +49,8 @@ #endif +extern kmem_cache_t *kiobuf_cachep; + static mdk_personality_t raid1_personality; static md_spinlock_t retry_list_lock = MD_SPIN_LOCK_UNLOCKED; struct raid1_bh *raid1_retry_list = NULL, **raid1_retry_tail; @@ -377,6 +381,60 @@ bh->b_end_io(bh, uptodate); raid1_free_r1bh(r1_bh); } + +void raid1_end_kio (struct kiobuf *kiobuf) +{ + struct raid1_bh * r1_bh = (struct raid1_bh *)(kiobuf->k_dev_id); + int i; + raid1_conf_t *conf; + + if(kiobuf->errno != 0) + md_error (mddev_to_kdev(r1_bh->mddev), r1_bh->kiodev); + else + set_bit (R1BH_Uptodate, &r1_bh->state); + + if ((r1_bh->cmd == READ) || (r1_bh->cmd == READA)) { + + conf = mddev_to_conf(r1_bh->mddev); + + io_request_done(r1_bh->sector, conf, + test_bit(R1BH_SyncPhase, &r1_bh->state)); + + r1_bh->kiobuf->end_io(r1_bh->kiobuf); + + kmem_cache_free(kiobuf_cachep, r1_bh->kiovec[0]); + kfree(r1_bh->kiovec); + raid1_free_r1bh(r1_bh); + + return; + } + + /* If we are the last RAID write request being done, call the kiobuf + * completion I/O routine. + */ + + if (atomic_dec_and_test(&r1_bh->remaining)) { + + conf = mddev_to_conf(r1_bh->mddev); + + io_request_done(r1_bh->sector, conf, + test_bit(R1BH_SyncPhase, &r1_bh->state)); + + r1_bh->kiobuf->end_io(r1_bh->kiobuf); + + for(i=0; i < conf->raid_disks; i++) + kmem_cache_free(kiobuf_cachep, r1_bh->kiovec[i]); + + kfree(r1_bh->kiovec); + raid1_free_r1bh(r1_bh); + + return; + } + + return; + +} + void raid1_end_request (struct buffer_head *bh, int uptodate) { struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_private); @@ -444,11 +502,10 @@ * reads should be somehow balanced. */ -static int raid1_read_balance (raid1_conf_t *conf, struct buffer_head *bh) +static int raid1_read_balance (raid1_conf_t *conf, kdev_t dev, + unsigned int sector, unsigned int count) { int new_disk = conf->last_used; - const int sectors = bh->b_size >> 9; - const unsigned long this_sector = bh->b_rsector; int disk = new_disk; unsigned long new_distance; unsigned long current_distance; @@ -486,7 +543,7 @@ * Don't touch anything for sequential reads. */ - if (this_sector == conf->mirrors[new_disk].head_position) + if (sector == conf->mirrors[new_disk].head_position) goto rb_out; /* @@ -511,7 +568,7 @@ goto rb_out; } - current_distance = abs(this_sector - + current_distance = abs(sector - conf->mirrors[disk].head_position); /* Find the disk which is closest */ @@ -523,7 +580,7 @@ (!conf->mirrors[disk].operational)) continue; - new_distance = abs(this_sector - + new_distance = abs(sector - conf->mirrors[disk].head_position); if (new_distance < current_distance) { @@ -534,78 +591,44 @@ } rb_out: - conf->mirrors[new_disk].head_position = this_sector + sectors; + conf->mirrors[new_disk].head_position = sector + count; conf->last_used = new_disk; - conf->sect_count += sectors; + conf->sect_count += count; return new_disk; } -static int raid1_make_request (mddev_t *mddev, int rw, - struct buffer_head * bh) + +static int bh_raid1_make_request(mddev_t *mddev, int rw, struct buffer_head *bh, + kdev_t dev, unsigned int sector, unsigned int count, + struct raid1_bh *r1_bh) { raid1_conf_t *conf = mddev_to_conf(mddev); - struct buffer_head *bh_req, *bhl; - struct raid1_bh * r1_bh; int disks = MD_SB_DISKS; - int i, sum_bhs = 0, sectors; + int i, sum_bhs = 0; + struct buffer_head *bh_req, *bhl; struct mirror_info *mirror; if (!buffer_locked(bh)) BUG(); - -/* - * make_request() can abort the operation when READA is being - * used and no empty request is available. - * - * Currently, just replace the command with READ/WRITE. - */ - if (rw == READA) - rw = READ; - - r1_bh = raid1_alloc_r1bh (conf); - - spin_lock_irq(&conf->segment_lock); - wait_event_lock_irq(conf->wait_done, - bh->b_rsector < conf->start_active || - bh->b_rsector >= conf->start_future, - conf->segment_lock); - if (bh->b_rsector < conf->start_active) - conf->cnt_done++; - else { - conf->cnt_future++; - if (conf->phase) - set_bit(R1BH_SyncPhase, &r1_bh->state); - } - spin_unlock_irq(&conf->segment_lock); - - /* - * i think the read and write branch should be separated completely, - * since we want to do read balancing on the read side for example. - * Alternative implementations? :) --mingo - */ r1_bh->master_bh = bh; - r1_bh->mddev = mddev; - r1_bh->cmd = rw; - sectors = bh->b_size >> 9; if (rw == READ) { - /* - * read balancing logic: - */ - mirror = conf->mirrors + raid1_read_balance(conf, bh); - + /* + * read balancing logic: + */ + mirror = conf->mirrors + raid1_read_balance(conf, dev, sector, count); bh_req = &r1_bh->bh_req; memcpy(bh_req, bh, sizeof(*bh)); - bh_req->b_blocknr = bh->b_rsector * sectors; + bh_req->b_blocknr = sector / count; bh_req->b_dev = mirror->dev; bh_req->b_rdev = mirror->dev; /* bh_req->b_rsector = bh->n_rsector; */ bh_req->b_end_io = raid1_end_request; bh_req->b_private = r1_bh; - generic_make_request (rw, bh_req, NULL, 0, 0, 0); + generic_make_request (rw, bh_req, NULL, 0, 0); return 0; } @@ -614,8 +637,10 @@ */ bhl = raid1_alloc_bh(conf, conf->raid_disks); + for (i = 0; i < disks; i++) { struct buffer_head *mbh; + if (!conf->mirrors[i].operational) continue; @@ -639,20 +664,20 @@ bhl = mbh->b_next; mbh->b_next = NULL; mbh->b_this_page = (struct buffer_head *)1; - + /* * prepare mirrored mbh (fields ordered for max mem throughput): */ - mbh->b_blocknr = bh->b_rsector * sectors; + mbh->b_blocknr = sector / count; mbh->b_dev = conf->mirrors[i].dev; mbh->b_rdev = conf->mirrors[i].dev; - mbh->b_rsector = bh->b_rsector; + mbh->b_rsector = sector; mbh->b_state = (1<b_count, 1); mbh->b_size = bh->b_size; - mbh->b_page = bh->b_page; + mbh->b_page = bh->b_page; mbh->b_data = bh->b_data; mbh->b_list = BUF_LOCKED; mbh->b_end_io = raid1_end_request; @@ -660,9 +685,14 @@ mbh->b_next = r1_bh->mirror_bh_list; r1_bh->mirror_bh_list = mbh; + + sum_bhs++; } + if (bhl) raid1_free_bh(conf,bhl); + + md_atomic_set(&r1_bh->remaining, sum_bhs); /* @@ -680,11 +710,177 @@ while(bh) { struct buffer_head *bh2 = bh; bh = bh->b_next; - generic_make_request(rw, bh2, NULL, 0, 0, 0); + generic_make_request(rw, bh2, NULL, 0, 0); } + return (0); } +/* + * kiobuf based make request function + */ + +static int kio_raid1_make_request(mddev_t *mddev, int rw, struct kiobuf *kiobuf, + kdev_t dev, unsigned int sector, unsigned count, + struct raid1_bh *r1_bh) +{ + raid1_conf_t *conf = mddev_to_conf(mddev); + int disks = MD_SB_DISKS; + int i, sum_bhs = 0, cnt = 0; + struct mirror_info *mirror; + + r1_bh->kiobuf = kiobuf; + r1_bh->kiodev = dev; + r1_bh->sector = sector; + + if (rw == READ) { + /* + * read balancing logic: + */ + struct kiobuf *rkio; + mirror = conf->mirrors + raid1_read_balance(conf, dev, sector, count); + + r1_bh->kiovec = (struct kiobuf **)kmalloc(sizeof(struct kiobuf *), GFP_BUFFER); + alloc_kiovec(1, r1_bh->kiovec); + + rkio = r1_bh->kiovec[0]; + + r1_bh->sector = sector; + + rkio->nr_pages = kiobuf->nr_pages; + rkio->array_len = kiobuf->array_len; + rkio->offset = kiobuf->offset; + rkio->length = kiobuf->length; + + rkio->maplist = kiobuf->maplist; + rkio->orig_maplist = kiobuf->orig_maplist; + rkio->locked = kiobuf->locked; + rkio->bounced = kiobuf->bounced; + + memcpy(rkio->map_array, kiobuf->map_array, KIO_STATIC_PAGES); + + memcpy(rkio->orig_map_array, kiobuf->orig_map_array, KIO_STATIC_PAGES); + + rkio->end_io = raid1_end_kio; + rkio->k_dev_id = r1_bh; + + rkio->errno = kiobuf->errno; + + generic_make_request (rw, NULL, rkio, mirror->dev, sector); + + return 0; + } + + + /* + * WRITE: + */ + + r1_bh->kiovec = (struct kiobuf **)kmalloc(sizeof(struct kiobuf *) + * conf->raid_disks, GFP_BUFFER); + + alloc_kiovec(conf->raid_disks, r1_bh->kiovec); + + + for (i = 0; i < disks; i++) { + struct kiobuf *wkio; + + if (!conf->mirrors[i].operational) + continue; + + wkio = r1_bh->kiovec[sum_bhs]; + + wkio->nr_pages = kiobuf->nr_pages; + wkio->array_len = kiobuf->array_len; + wkio->offset = kiobuf->offset; + wkio->length = kiobuf->length; + + wkio->maplist = kiobuf->maplist; + wkio->orig_maplist = kiobuf->orig_maplist; + wkio->locked = kiobuf->locked; + wkio->bounced = kiobuf->bounced; + memcpy(wkio->map_array, kiobuf->map_array, KIO_STATIC_PAGES); + memcpy(wkio->orig_map_array, kiobuf->orig_map_array, KIO_STATIC_PAGES); + wkio->end_io = raid1_end_kio; + wkio->k_dev_id = r1_bh; + wkio->errno = kiobuf->errno; + + sum_bhs++; + + } + + md_atomic_set(&r1_bh->remaining, sum_bhs); + + /* Now submit IO to all working mirrors */ + + for(i=0; i < disks ; i++) { + struct kiobuf *kio = r1_bh->kiovec[cnt]; + kdev_t devi = conf->mirrors[i].dev; + + if (!conf->mirrors[i].operational) + continue; + + cnt++; + + generic_make_request(rw, NULL, kio, devi, sector); + + if(--sum_bhs < 0) BUG(); + } + + return (0); +} + + +static int raid1_make_request (mddev_t *mddev, int rw, struct buffer_head * bh, + struct kiobuf *kiobuf, kdev_t dev, unsigned int sector, + unsigned long count) +{ + raid1_conf_t *conf = mddev_to_conf(mddev); + struct raid1_bh * r1_bh; + +/* + * make_request() can abort the operation when READA is being + * used and no empty request is available. + * + * Currently, just replace the command with READ/WRITE. + */ + if (rw == READA) + rw = READ; + + r1_bh = raid1_alloc_r1bh (conf); + + spin_lock_irq(&conf->segment_lock); + wait_event_lock_irq(conf->wait_done, + sector < conf->start_active || + sector >= conf->start_future, + conf->segment_lock); + if (sector < conf->start_active) + conf->cnt_done++; + else { + conf->cnt_future++; + if (conf->phase) + set_bit(R1BH_SyncPhase, &r1_bh->state); + } + spin_unlock_irq(&conf->segment_lock); + + /* + * i think the read and write branch should be separated completely, + * since we want to do read balancing on the read side for example. + * Alternative implementations? :) --mingo + */ + + r1_bh->mddev = mddev; + r1_bh->cmd = rw; + + if(bh) + return bh_raid1_make_request(mddev, rw, bh, dev, sector, count, r1_bh); + else if (kiobuf) + return kio_raid1_make_request(mddev, rw, kiobuf, dev, sector, count, r1_bh); + printk(KERN_ERR "raid1_make_request: neither kio and bh IO!\n"); + BUG(); + return (1); +} + static int raid1_status (char *page, mddev_t *mddev) { raid1_conf_t *conf = mddev_to_conf(mddev); @@ -1180,7 +1376,7 @@ while (mbh) { struct buffer_head *bh1 = mbh; mbh = mbh->b_next; - generic_make_request(WRITE, bh1, NULL, 0, 0, 0); + generic_make_request(WRITE, bh1, NULL, 0, 0); md_sync_acct(bh1->b_rdev, bh1->b_size/512); } } else { @@ -1193,7 +1389,7 @@ printk (REDIRECT_SECTOR, partition_name(bh->b_dev), bh->b_blocknr); bh->b_rdev = bh->b_dev; - generic_make_request(READ, bh, NULL, 0, 0, 0); + generic_make_request(READ, bh, NULL, 0, 0); } } @@ -1210,7 +1406,7 @@ printk (REDIRECT_SECTOR, partition_name(bh->b_dev), bh->b_blocknr); bh->b_rdev = bh->b_dev; - generic_make_request (r1_bh->cmd, bh, NULL, 0, 0, 0); + generic_make_request (r1_bh->cmd, bh, NULL, 0, 0); } break; } @@ -1254,7 +1450,7 @@ conf->start_future = mddev->sb->size+1; conf->cnt_pending = conf->cnt_future; conf->cnt_future = 0; - conf->phase = conf->phase ^1; + conf->phase = conf->phase ^1; wait_event_lock_irq(conf->wait_ready, !conf->cnt_pending, conf->segment_lock); conf->start_active = conf->start_ready = conf->start_pending = conf->start_future = 0; conf->phase = 0; @@ -1405,7 +1601,7 @@ bh->b_rsector = block_nr<<1; init_waitqueue_head(&bh->b_wait); - generic_make_request(READ, bh, NULL, 0, 0, 0); + generic_make_request(READ, bh, NULL, 0, 0); md_sync_acct(bh->b_rdev, bh->b_size/512); return (bsize >> 10); diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/drivers/md/raid5.c linux-2.4-xfs/linux/drivers/md/raid5.c --- temp/linux-2.4-xfs/linux.orig/drivers/md/raid5.c Fri Nov 24 19:32:11 2000 +++ linux-2.4-xfs/linux/drivers/md/raid5.c Fri Nov 24 18:45:22 2000 @@ -1069,11 +1069,11 @@ PRINTK("writing spare %d\n", i); atomic_inc(&sh->nr_pending); bh->b_dev = bh->b_rdev = conf->spare->dev; - generic_make_request(WRITE, bh, NULL, 0, 0, 0); + generic_make_request(WRITE, bh, NULL, 0, 0); } else { atomic_inc(&sh->nr_pending); bh->b_dev = bh->b_rdev = conf->disks[i].dev; - generic_make_request(WRITE, bh, NULL, 0, 0, 0); + generic_make_request(WRITE, bh, NULL, 0, 0); } atomic_dec(&bh->b_count); } @@ -1112,7 +1112,7 @@ lock_get_bh(sh->bh_old[i]); atomic_inc(&sh->nr_pending); sh->bh_old[i]->b_dev = sh->bh_old[i]->b_rdev = conf->disks[i].dev; - generic_make_request(READ, sh->bh_old[i], NULL, 0, 0, 0); + generic_make_request(READ, sh->bh_old[i], NULL, 0, 0); atomic_dec(&sh->bh_old[i]->b_count); } PRINTK("handle_stripe() %lu, reading %d old buffers\n", sh->sector, md_atomic_read(&sh->nr_pending)); @@ -1157,7 +1157,7 @@ lock_get_bh(sh->bh_old[i]); atomic_inc(&sh->nr_pending); sh->bh_old[i]->b_dev = sh->bh_old[i]->b_rdev = conf->disks[i].dev; - generic_make_request(READ, sh->bh_old[i], NULL, 0, 0, 0); + generic_make_request(READ, sh->bh_old[i], NULL, 0, 0); atomic_dec(&sh->bh_old[i]->b_count); } PRINTK("handle_stripe() %lu, phase READ_OLD, pending %d buffers\n", sh->sector, md_atomic_read(&sh->nr_pending)); @@ -1186,7 +1186,7 @@ lock_get_bh(sh->bh_req[i]); atomic_inc(&sh->nr_pending); sh->bh_req[i]->b_dev = sh->bh_req[i]->b_rdev = conf->disks[i].dev; - generic_make_request(READ, sh->bh_req[i], NULL, 0, 0, 0); + generic_make_request(READ, sh->bh_req[i], NULL, 0, 0); atomic_dec(&sh->bh_req[i]->b_count); } PRINTK("handle_stripe() %lu, phase READ, pending %d\n", sh->sector, md_atomic_read(&sh->nr_pending)); @@ -1222,7 +1222,7 @@ lock_get_bh(bh); atomic_inc(&sh->nr_pending); bh->b_dev = bh->b_rdev = conf->disks[i].dev; - generic_make_request(READ, bh, NULL, 0, 0, 0); + generic_make_request(READ, bh, NULL, 0, 0); md_sync_acct(bh->b_rdev, bh->b_size/512); atomic_dec(&sh->bh_old[i]->b_count); } @@ -1251,7 +1251,7 @@ atomic_inc(&sh->nr_pending); lock_get_bh(bh); bh->b_dev = bh->b_rdev = conf->spare->dev; - generic_make_request(WRITE, bh, NULL, 0, 0, 0); + generic_make_request(WRITE, bh, NULL, 0, 0); md_sync_acct(bh->b_rdev, bh->b_size/512); atomic_dec(&bh->b_count); PRINTK("handle_stripe_sync() %lu, phase WRITE, pending %d buffers\n", sh->sector, md_atomic_read(&sh->nr_pending)); @@ -1277,7 +1277,7 @@ lock_get_bh(bh); atomic_inc(&sh->nr_pending); bh->b_dev = bh->b_rdev = conf->disks[pd_idx].dev; - generic_make_request(WRITE, bh, NULL, 0, 0, 0); + generic_make_request(WRITE, bh, NULL, 0, 0); md_sync_acct(bh->b_rdev, bh->b_size/512); atomic_dec(&bh->b_count); PRINTK("handle_stripe_sync() %lu phase WRITE, pending %d buffers\n", diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/fs/buffer.c linux-2.4-xfs/linux/fs/buffer.c --- temp/linux-2.4-xfs/linux.orig/fs/buffer.c Fri Nov 24 19:32:42 2000 +++ linux-2.4-xfs/linux/fs/buffer.c Fri Nov 24 19:03:58 2000 @@ -2057,7 +2057,7 @@ atomic_inc(&iobuf->io_count); - generic_make_request(rw, tmp, NULL, 0, 0, 0); + generic_make_request(rw, tmp, NULL, 0, 0); /* * Wait for IO if we have got too much */ diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/fs/iobuf.c linux-2.4-xfs/linux/fs/iobuf.c --- temp/linux-2.4-xfs/linux.orig/fs/iobuf.c Fri Nov 24 19:32:42 2000 +++ linux-2.4-xfs/linux/fs/iobuf.c Sun Nov 19 15:01:29 2000 @@ -11,8 +11,7 @@ #include #include -static kmem_cache_t *kiobuf_cachep; - +kmem_cache_t *kiobuf_cachep; void end_kio_request(struct kiobuf *kiobuf, int uptodate) { diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/include/linux/blkdev.h linux-2.4-xfs/linux/include/linux/blkdev.h --- temp/linux-2.4-xfs/linux.orig/include/linux/blkdev.h Fri Nov 24 19:32:59 2000 +++ linux-2.4-xfs/linux/include/linux/blkdev.h Fri Nov 24 19:51:09 2000 @@ -157,7 +157,7 @@ extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void generic_make_request(int rw, struct buffer_head * bh, struct kiobuf * kiobuf, kdev_t dev, - unsigned long blkocknr, size_t blksize); + unsigned long sect); extern request_queue_t *blk_get_queue(kdev_t dev); extern void blkdev_release_request(struct request *); diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/include/linux/raid/md_k.h linux-2.4-xfs/linux/include/linux/raid/md_k.h --- temp/linux-2.4-xfs/linux.orig/include/linux/raid/md_k.h Fri Nov 24 19:33:01 2000 +++ linux-2.4-xfs/linux/include/linux/raid/md_k.h Fri Nov 24 19:41:06 2000 @@ -217,7 +217,8 @@ struct mdk_personality_s { char *name; - int (*make_request)(mddev_t *mddev, int rw, struct buffer_head * bh); + int (*make_request)(mddev_t *mddev, int rw, struct buffer_head * bh, + struct kiobuf *kiobuf, kdev_t dev, unsigned int sector, unsigned int count); int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); int (*status)(char *page, mddev_t *mddev); diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/include/linux/raid/raid1.h linux-2.4-xfs/linux/include/linux/raid/raid1.h --- temp/linux-2.4-xfs/linux.orig/include/linux/raid/raid1.h Fri Nov 24 19:33:01 2000 +++ linux-2.4-xfs/linux/include/linux/raid/raid1.h Fri Nov 24 19:53:13 2000 @@ -84,6 +84,12 @@ struct buffer_head *mirror_bh_list; struct buffer_head bh_req; struct raid1_bh *next_r1; /* next for retry or in free list */ + + /* these information is only used by kiobuf based IO */ + struct kiobuf *kiobuf; + struct kiobuf **kiovec; + kdev_t kiodev; + unsigned long sector; }; /* bits for raid1_bh.state */ #define R1BH_Uptodate 1 diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/include/linux/slab.h linux-2.4-xfs/linux/include/linux/slab.h --- temp/linux-2.4-xfs/linux.orig/include/linux/slab.h Fri Nov 24 19:33:00 2000 +++ linux-2.4-xfs/linux/include/linux/slab.h Sun Nov 19 15:01:17 2000 @@ -75,6 +75,7 @@ extern kmem_cache_t *bh_cachep; extern kmem_cache_t *fs_cachep; extern kmem_cache_t *sigact_cachep; +extern kmem_cache_t *kiobuf_cachep; #endif /* __KERNEL__ */ diff -Nur --exclude-from=/home/marcelo/exclude temp/linux-2.4-xfs/linux.orig/kernel/ksyms.c linux-2.4-xfs/linux/kernel/ksyms.c --- temp/linux-2.4-xfs/linux.orig/kernel/ksyms.c Fri Nov 24 19:33:03 2000 +++ linux-2.4-xfs/linux/kernel/ksyms.c Fri Nov 24 18:20:16 2000 @@ -423,6 +423,8 @@ /* Kiobufs */ EXPORT_SYMBOL(kiobuf_init); +EXPORT_SYMBOL(kiobuf_cachep); + EXPORT_SYMBOL(alloc_kiovec); EXPORT_SYMBOL(free_kiovec); EXPORT_SYMBOL(expand_kiobuf); --- temp/linux-2.4-xfs/linux/fs/pagebuf/page_buf.c Fri Nov 10 17:20:04 2000 +++ linux-2.4-xfs/linux/fs/pagebuf/page_buf.c Fri Nov 24 18:49:50 2000 @@ -1437,7 +1437,7 @@ atomic_inc(&PBP(pb)->pb_io_remaining); for (itr=0; itr < cnt; itr++){ - generic_make_request(rw, psync->bh[itr], NULL, 0, 0, 0); + generic_make_request(rw, psync->bh[itr], NULL, 0, 0); } } else { kfree(psync);