[BACK]Return to jens-blk-trace CVS log [TXT][DIR] Up to [Development] / linux-2.6-xfs / split-patches

File: [Development] / linux-2.6-xfs / split-patches / Attic / jens-blk-trace (download)

Revision 1.2, Mon Aug 29 15:50:38 2005 UTC (12 years, 1 month ago) by nathans.longdrop.melbourne.sgi.com
Branch: MAIN
Changes since 1.1: +41 -41 lines

Merge up to 2.6.13.
Merge of 2.6.x-xfs-melb:linux:23660a by kenmcd.

%patch
Index: 2.6.x-xfs/drivers/block/Kconfig
===================================================================
--- 2.6.x-xfs.orig/drivers/block/Kconfig	2005-06-20 16:55:30.399670611 +1000
+++ 2.6.x-xfs/drivers/block/Kconfig	2005-08-29 13:30:18.888381015 +1000
@@ -461,6 +461,14 @@
 	  your machine, or if you want to have a raid or loopback device
 	  bigger than 2TB.  Otherwise say N.
 
+config BLK_DEV_IO_TRACE
+	bool "Support for tracing block io actions"
+	select RELAYFS
+	help
+	  Say Y here, if you want to be able to trace the block layer actions
+	  on a given queue.
+
+
 config CDROM_PKTCDVD
 	tristate "Packet writing on CD/DVD media"
 	depends on !UML
Index: 2.6.x-xfs/drivers/block/Makefile
===================================================================
--- 2.6.x-xfs.orig/drivers/block/Makefile	2005-01-05 14:36:26.000000000 +1100
+++ 2.6.x-xfs/drivers/block/Makefile	2005-08-29 13:30:18.889357443 +1000
@@ -45,3 +45,5 @@
 obj-$(CONFIG_BLK_DEV_SX8)	+= sx8.o
 obj-$(CONFIG_BLK_DEV_UB)	+= ub.o
 
+obj-$(CONFIG_BLK_DEV_IO_TRACE)	+= blktrace.o
+
Index: 2.6.x-xfs/drivers/block/blktrace.c
===================================================================
--- 2.6.x-xfs.orig/drivers/block/blktrace.c	2005-05-25 01:08:16.000000000 +1000
+++ 2.6.x-xfs/drivers/block/blktrace.c	2005-08-29 13:30:18.890333871 +1000
@@ -0,0 +1,124 @@
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/blktrace.h>
+#include <asm/uaccess.h>
+
+void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+		     int rw, u32 what, int error, int pdu_len, char *pdu_data)
+{
+	struct blk_io_trace t;
+	unsigned long flags;
+
+	if (rw & (1 << BIO_RW_BARRIER))
+		what |= BLK_TC_ACT(BLK_TC_BARRIER);
+	if (rw & (1 << BIO_RW_SYNC))
+		what |= BLK_TC_ACT(BLK_TC_SYNC);
+
+	if (rw & WRITE)
+		what |= BLK_TC_ACT(BLK_TC_WRITE);
+	else
+		what |= BLK_TC_ACT(BLK_TC_READ);
+		
+	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
+		return;
+
+	t.magic		= BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+	t.sequence	= atomic_add_return(1, &bt->sequence);
+	t.time		= sched_clock();
+	t.sector	= sector;
+	t.bytes		= bytes;
+	t.action	= what;
+	t.pid		= current->pid;
+	t.error		= error;
+	t.pdu_len	= pdu_len;
+
+	local_irq_save(flags);
+	__relay_write(bt->rchan, &t, sizeof(t));
+	if (pdu_len)
+		__relay_write(bt->rchan, pdu_data, pdu_len);
+	local_irq_restore(flags);
+}
+
+int blk_stop_trace(struct block_device *bdev)
+{
+	request_queue_t *q = bdev_get_queue(bdev);
+	struct blk_trace *bt = NULL;
+	int ret = -EINVAL;
+
+	if (!q)
+		return -ENXIO;
+
+	down(&bdev->bd_sem);
+
+	spin_lock_irq(q->queue_lock);
+	if (q->blk_trace) {
+		bt = q->blk_trace;
+		q->blk_trace = NULL;
+		ret = 0;
+	}
+	spin_unlock_irq(q->queue_lock);
+
+	up(&bdev->bd_sem);
+
+	if (bt) {
+		relay_close(bt->rchan);
+		kfree(bt);
+	}
+
+	return ret;
+}
+
+int blk_start_trace(struct block_device *bdev, char __user *arg)
+{
+	request_queue_t *q = bdev_get_queue(bdev);
+	struct blk_user_trace_setup buts;
+	struct blk_trace *bt;
+	char b[BDEVNAME_SIZE];
+	int ret = 0;
+
+	if (!q)
+		return -ENXIO;
+
+	if (copy_from_user(&buts, arg, sizeof(buts)))
+		return -EFAULT;
+
+	if (!buts.buf_size || !buts.buf_nr)
+		return -EINVAL;
+
+	strcpy(buts.name, bdevname(bdev, b));
+
+	if (copy_to_user(arg, &buts, sizeof(buts)))
+		return -EFAULT;
+
+	down(&bdev->bd_sem);
+	ret = -EBUSY;
+	if (q->blk_trace)
+		goto err;
+
+	ret = -ENOMEM;
+	bt = kmalloc(sizeof(*bt), GFP_KERNEL);
+	if (!bt)
+		goto err;
+
+	atomic_set(&bt->sequence, 0);
+
+	bt->rchan = relay_open(bdevname(bdev, b), NULL, buts.buf_size,
+				buts.buf_nr, NULL);
+	ret = -EIO;
+	if (!bt->rchan)
+		goto err;
+
+	bt->act_mask = buts.act_mask;
+	if (!bt->act_mask)
+		bt->act_mask = (u16) -1;
+
+	spin_lock_irq(q->queue_lock);
+	q->blk_trace = bt;
+	spin_unlock_irq(q->queue_lock);
+	ret = 0;
+err:
+	up(&bdev->bd_sem);
+	return ret;
+}
+
Index: 2.6.x-xfs/drivers/block/elevator.c
===================================================================
--- 2.6.x-xfs.orig/drivers/block/elevator.c	2005-08-29 13:13:43.097701535 +1000
+++ 2.6.x-xfs/drivers/block/elevator.c	2005-08-29 13:30:18.891310299 +1000
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
+#include <linux/blktrace.h>
 
 #include <asm/uaccess.h>
 
@@ -371,6 +372,9 @@
 	int ret;
 
 	while ((rq = __elv_next_request(q)) != NULL) {
+
+		blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+
 		/*
 		 * just mark as started even if we don't start it, a request
 		 * that has been delayed should not be passed by new incoming
Index: 2.6.x-xfs/drivers/block/ioctl.c
===================================================================
--- 2.6.x-xfs.orig/drivers/block/ioctl.c	2005-08-29 13:13:43.099654391 +1000
+++ 2.6.x-xfs/drivers/block/ioctl.c	2005-08-29 13:31:11.336241153 +1000
@@ -4,6 +4,7 @@
 #include <linux/backing-dev.h>
 #include <linux/buffer_head.h>
 #include <linux/smp_lock.h>
+#include <linux/blktrace.h>
 #include <asm/uaccess.h>
 
 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
@@ -188,6 +189,10 @@
 		return put_ulong(arg, bdev->bd_inode->i_size >> 9);
 	case BLKGETSIZE64:
 		return put_u64(arg, bdev->bd_inode->i_size);
+	case BLKSTARTTRACE:
+		return blk_start_trace(bdev, (char __user *) arg);
+	case BLKSTOPTRACE:
+		return blk_stop_trace(bdev);
 	}
 	return -ENOIOCTLCMD;
 }
Index: 2.6.x-xfs/drivers/block/ll_rw_blk.c
===================================================================
--- 2.6.x-xfs.orig/drivers/block/ll_rw_blk.c	2005-08-29 13:13:43.103560103 +1000
+++ 2.6.x-xfs/drivers/block/ll_rw_blk.c	2005-08-29 14:09:26.343970362 +1000
@@ -29,6 +29,7 @@
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
+#include <linux/blktrace.h>
 
 /*
  * for max sense size
@@ -1623,6 +1624,12 @@
 	if (q->queue_tags)
 		__blk_queue_free_tags(q);
 
+	if (q->blk_trace) {
+		relay_close(q->blk_trace->rchan);
+		kfree(q->blk_trace);
+		q->blk_trace = NULL;
+	}
+
 	blk_queue_ordered(q, QUEUE_ORDERED_NONE);
 
 	kmem_cache_free(requestq_cachep, q);
@@ -1969,6 +1976,8 @@
 	
 	rq_init(q, rq);
 	rq->rl = rl;
+
+	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
 	return rq;
 }
@@ -1997,6 +2006,8 @@
 		if (!rq) {
 			struct io_context *ioc;
 
+			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+
 			__generic_unplug_device(q);
 			spin_unlock_irq(q->queue_lock);
 			io_schedule();
@@ -2050,6 +2061,8 @@
  */
 void blk_requeue_request(request_queue_t *q, struct request *rq)
 {
+	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+
 	if (blk_rq_tagged(rq))
 		blk_queue_end_tag(q, rq);
 
@@ -2580,6 +2593,8 @@
 			if (!q->back_merge_fn(q, req, bio))
 				break;
 
+			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+
 			req->biotail->bi_next = bio;
 			req->biotail = bio;
 			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
@@ -2595,6 +2610,8 @@
 			if (!q->front_merge_fn(q, req, bio))
 				break;
 
+			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+
 			bio->bi_next = req->bio;
 			req->bio = bio;
 
@@ -2620,6 +2637,8 @@
 	}
 
 get_rq:
+	blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+
 	/*
 	 * Grab a free request. This is might sleep but can not fail.
 	 * Returns with the queue unlocked.
@@ -2896,6 +2915,10 @@
 		blk_partition_remap(bio);
 
 		ret = q->make_request_fn(q, bio);
+
+		if (ret)
+			blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+
 	} while (ret);
 }
 
@@ -3014,6 +3037,8 @@
 	int total_bytes, bio_nbytes, error, next_idx = 0;
 	struct bio *bio;
 
+	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+
 	/*
 	 * extend uptodate bool to allow < 0 value to be direct io error
 	 */
Index: 2.6.x-xfs/include/linux/blkdev.h
===================================================================
--- 2.6.x-xfs.orig/include/linux/blkdev.h	2005-08-29 13:13:45.891261758 +1000
+++ 2.6.x-xfs/include/linux/blkdev.h	2005-08-29 13:30:18.903027437 +1000
@@ -22,6 +22,7 @@
 struct elevator_queue;
 typedef struct elevator_queue elevator_t;
 struct request_pm_state;
+struct blk_trace;
 
 #define BLKDEV_MIN_RQ	4
 #define BLKDEV_MAX_RQ	128	/* Default maximum */
@@ -412,6 +413,8 @@
 	 */
 	struct request		*flush_rq;
 	unsigned char		ordered;
+
+	struct blk_trace	*blk_trace;
 };
 
 enum {
Index: 2.6.x-xfs/include/linux/blktrace.h
===================================================================
--- 2.6.x-xfs.orig/include/linux/blktrace.h	2005-05-25 01:08:16.000000000 +1000
+++ 2.6.x-xfs/include/linux/blktrace.h	2005-08-29 13:30:18.919626715 +1000
@@ -0,0 +1,145 @@
+#ifndef BLKTRACE_H
+#define BLKTRACE_H
+
+#include <linux/config.h>
+#include <linux/blkdev.h>
+#include <linux/relayfs_fs.h>
+
+/*
+ * Trace categories
+ */
+enum {
+	BLK_TC_READ	= 1 << 0,	/* reads */
+	BLK_TC_WRITE	= 1 << 1,	/* writes */
+	BLK_TC_BARRIER	= 1 << 2,	/* barrier */
+	BLK_TC_SYNC	= 1 << 3,	/* barrier */
+	BLK_TC_QUEUE	= 1 << 4,	/* queueing/merging */
+	BLK_TC_REQUEUE	= 1 << 5,	/* requeueing */
+	BLK_TC_ISSUE	= 1 << 6,	/* issue */
+	BLK_TC_COMPLETE	= 1 << 7,	/* completions */
+	BLK_TC_FS	= 1 << 8,	/* fs requests */
+	BLK_TC_PC	= 1 << 9,	/* pc requests */
+
+	BLK_TC_END	= 1 << 15,	/* only 16-bits, reminder */
+};
+
+#define BLK_TC_SHIFT		(16)
+#define BLK_TC_ACT(act)		((act) << BLK_TC_SHIFT)
+
+/*
+ * Basic trace actions
+ */
+enum {
+	__BLK_TA_QUEUE = 1,		/* queued */
+	__BLK_TA_BACKMERGE,		/* back merged to existing rq */
+	__BLK_TA_FRONTMERGE,		/* front merge to existing rq */
+	__BLK_TA_GETRQ,			/* allocated new request */
+	__BLK_TA_SLEEPRQ,		/* sleeping on rq allocation */
+	__BLK_TA_REQUEUE,		/* request requeued */
+	__BLK_TA_ISSUE,			/* sent to driver */
+	__BLK_TA_COMPLETE,		/* completed by driver */
+};
+
+/*
+ * Trace actions in full. Additionally, read or write is masked
+ */
+#define BLK_TA_QUEUE		(__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
+#define BLK_TA_BACKMERGE	(__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
+#define BLK_TA_FRONTMERGE	(__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
+#define	BLK_TA_GETRQ		(__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
+#define	BLK_TA_SLEEPRQ		(__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
+#define	BLK_TA_REQUEUE		(__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
+#define BLK_TA_ISSUE		(__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
+#define BLK_TA_COMPLETE		(__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
+
+#define BLK_IO_TRACE_MAGIC	0x65617400
+#define BLK_IO_TRACE_VERSION	0x02
+
+/*
+ * The trace itself
+ */
+struct blk_io_trace {
+	u32 magic;		/* MAGIC << 8 | version */
+	u32 sequence;		/* event number */
+	u64 time;		/* in microseconds */
+	u64 sector;		/* disk offset */
+	u32 bytes;		/* transfer length */
+	u32 action;		/* what happened */
+	u32 pid;		/* who did it */
+	u16 error;		/* completion error */
+	u16 pdu_len;		/* length of data after this trace */
+};
+
+struct blk_trace {
+	struct rchan *rchan;
+	atomic_t sequence;
+	u16 act_mask;
+};
+
+/*
+ * User setup structure passed with BLKSTARTTRACE
+ */
+struct blk_user_trace_setup {
+	char name[BDEVNAME_SIZE];	/* output */
+	u16 act_mask;			/* input */
+	u32 buf_size;			/* input */
+	u32 buf_nr;			/* input */
+};
+
+#if defined(CONFIG_BLK_DEV_IO_TRACE)
+extern int blk_start_trace(struct block_device *, char __user *);
+extern int blk_stop_trace(struct block_device *);
+extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, char *);
+
+static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+				    u32 what)
+{
+	struct blk_trace *bt = q->blk_trace;
+	int rw = rq->flags & 0x07;
+
+	if (likely(!bt))
+		return;
+
+	if (blk_pc_request(rq)) {
+		what |= BLK_TC_ACT(BLK_TC_PC);
+		__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
+	} else  {
+		what |= BLK_TC_ACT(BLK_TC_FS);
+		__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
+	}
+}
+
+static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+				     u32 what)
+{
+	struct blk_trace *bt = q->blk_trace;
+
+	if (likely(!bt))
+		return;
+
+	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+}
+
+static inline void blk_add_trace_generic(struct request_queue *q,
+					 struct bio *bio, int rw, u32 what)
+{
+	struct blk_trace *bt = q->blk_trace;
+
+	if (likely(!bt))
+		return;
+
+	if (bio)
+		blk_add_trace_bio(q, bio, what);
+	else
+		__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
+}
+
+#else /* !CONFIG_BLK_DEV_IO_TRACE */
+#define blk_start_trace(bdev, arg)		(-EINVAL)
+#define blk_stop_trace(bdev)			(-EINVAL)
+#define blk_add_trace_rq(q, rq, what)		do { } while (0)
+#define blk_add_trace_bio(q, rq, what)		do { } while (0)
+#define blk_add_trace_generic(q, rq, rw, what)	do { } while (0)
+#endif /* CONFIG_BLK_DEV_IO_TRACE */
+
+#endif
Index: 2.6.x-xfs/include/linux/fs.h
===================================================================
--- 2.6.x-xfs.orig/include/linux/fs.h	2005-08-29 13:28:11.627571456 +1000
+++ 2.6.x-xfs/include/linux/fs.h	2005-08-29 13:30:18.922555999 +1000
@@ -194,6 +194,8 @@
 #define BLKBSZGET  _IOR(0x12,112,size_t)
 #define BLKBSZSET  _IOW(0x12,113,size_t)
 #define BLKGETSIZE64 _IOR(0x12,114,size_t)	/* return device size in bytes (u64 *arg) */
+#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
+#define BLKSTOPTRACE _IO(0x12,116)
 
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */

%diffstat
 drivers/block/Kconfig     |    8 ++
 drivers/block/Makefile    |    2 
 drivers/block/blktrace.c  |  124 +++++++++++++++++++++++++++++++++++++++
 drivers/block/elevator.c  |    4 +
 drivers/block/ioctl.c     |    5 +
 drivers/block/ll_rw_blk.c |   25 +++++++
 include/linux/blkdev.h    |    3 
 include/linux/blktrace.h  |  145 ++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/fs.h        |    2 
 9 files changed, 318 insertions(+)