View Incident:
http://co-op.engr.sgi.com/BugWorks/code/bwxquery.cgi?search=Search&wlong=1&view_type=Bug&wi=796141
Submitter : dxm Submitter Domain : engr
Assigned Engineer : btg Assigned Domain : sgi.com
Assigned Group : xfs-linux Category : software
Customer Reported : F Priority : 3
Project : xfs-linux Status : open
Description :
(Rusell & I have been discussing this one - just wanted to
keep the info central)
If a buffer is allocated with a certain size then the actual
number of bytes to read/write is set to a smaller value with
XFS_BUF_SET_COUNT, pagebuf ignores the small value and
reads/writes the whole buffer.
This only occurs in XFS in the log code, specifically in
xlog_bread, xlog_bwrite, xlog_write_log_records and
xlog_clear_stale_blocks.
This is due to pb_count_desired not being handled properly.
Attached is a fix that works for me - I haven't checked it
in due to repurcussions:
*** /usr/tmp/p_rdiff_a0fNp4/xfs_buf.h Thu Jul 13 11:40:12 2000
--- xfs_buf.h Thu Jul 13 10:45:38 2000
***************
*** 456,462 ****
((bp)->pb_count_desired = cnt)
#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) \
! ((bp)->pb_buffer_length = cnt)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
--- 456,462 ----
((bp)->pb_count_desired = cnt)
#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) \
! ((bp)->pb_count_desired = (bp)->pb_buffer_length = cnt)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
*** /usr/tmp/p_rdiff_a0gEie/page_buf.c Thu Jul 13 11:52:43 2000
--- page_buf.c Thu Jul 13 11:52:03 2000
***************
*** 76,81 ****
--- 76,82 ----
#include <linux/locks.h>
#include <linux/swap.h>
#include <asm/hardirq.h>
+ #include <asm/kdb.h>
MODULE_PARM(debug, "i");
MODULE_PARM(pagebuf_max_pin, "i");
***************
*** 277,283 ****
PBP(pb)->pb_last_holder = current;
pb->pb_target = ip;
pb->pb_file_offset = range_base;
! pb->pb_buffer_length = range_length;
pb->pb_flags = flags | PBF_NONE;
pb->pb_bn = PAGE_BUF_DADDR_NULL;
atomic_set(&PBP(pb)->pb_pin_count, 0);
--- 278,284 ----
PBP(pb)->pb_last_holder = current;
pb->pb_target = ip;
pb->pb_file_offset = range_base;
! pb->pb_buffer_length = pb->pb_count_desired = range_length;
pb->pb_flags = flags | PBF_NONE;
pb->pb_bn = PAGE_BUF_DADDR_NULL;
atomic_set(&PBP(pb)->pb_pin_count, 0);
***************
*** 753,760 ****
if (PB_BMAP_FUNC(pb) == NULL) {
/* Use identity mapping */
pb->pb_bn = pb->pb_file_offset >> PB_SECTOR_BITS(pb);
- pb->pb_count_desired =
- pb->pb_buffer_length & ~(PB_SECTOR_SIZE(pb) - 1);
}
if (flags & PBF_READ) {
--- 754,759 ----
***************
*** 832,840 ****
}
kbp->locked = 0;
! pb->pb_buffer_length = len;
! pb->pb_count_desired =
! pb->pb_buffer_length & ~(PB_SECTOR_SIZE(pb) - 1);
pb->pb_flags |= PBF_MAPPED;
return 0;
--- 831,837 ----
}
kbp->locked = 0;
! pb->pb_buffer_length = pb->pb_count_desired = len;
pb->pb_flags |= PBF_MAPPED;
return 0;
***************
*** 1201,1208 ****
if (PB_BMAP_FUNC(pb) == NULL) {
/* Use identity mapping */
pb->pb_bn = pb->pb_file_offset >> PB_SECTOR_BITS(pb);
- pb->pb_count_desired =
- pb->pb_buffer_length & ~(PB_SECTOR_SIZE(pb) - 1);
pb->pb_count_residual = pb->pb_count_desired;
--- 1198,1203 ----
***************
*** 1935,1946 ****
int status = 0;
int sval;
loff_t buffer_offset = pb->pb_file_offset;
! size_t buffer_len = pb->pb_buffer_length;
size_t page_offset;
size_t len;
size_t total = 0;
size_t cur_offset;
size_t cur_len;
#if CONFIG_KIOBUF_IO
loff_t pb_offset;
unsigned long blocknr;
--- 1930,1942 ----
int status = 0;
int sval;
loff_t buffer_offset = pb->pb_file_offset;
! size_t buffer_len;
size_t page_offset;
size_t len;
size_t total = 0;
size_t cur_offset;
size_t cur_len;
+
#if CONFIG_KIOBUF_IO
loff_t pb_offset;
unsigned long blocknr;
***************
*** 1958,1963 ****
--- 1954,1968 ----
}
#endif
+
+ if (pb->pb_count_desired != pb->pb_buffer_length) {
+ printk("pagebuf_segment_apply length:%d
count:%d\n",pb->pb_buffer_length,pb->pb_count_desired);
+
+ if (!pb->pb_count_desired) KDB_ENTER();
+ }
+
+ buffer_len =
pb->pb_count_desired?pb->pb_count_desired:pb->pb_buffer_length;
+
pagebuf_hold(pb);
for (vec_index = 0; vec_index < pb->pb_mem.pba_kiocnt; vec_index++) {
if (buffer_len == 0)
***************
*** 1970,1994 ****
cur_offset = kb->offset;
cur_len = kb->length;
if (cur_len > buffer_len) {
- /*
- * This is a BUG! Per the kiovec allocation code,
- * we shouldn't have kb->length != pb_buffer_length.
- * And we only ever allocate a single kiobuf in a
- * kiovec; so this condition should never be triggered.
- * Lets see if I'm wrong.
- * -Chait.
- */
cur_len = buffer_len;
! /*
! * So, if we decide to unilaterally modify the amount
! * of I/O done thus, then kb->length would be invalid!
! * Not kosher, since kiobuf semantics indicate that
! * kb->length is the amount of I/O to be performed
! * against this kiobuf.
! * -Chait.
! */
! BUG();
}
#if CONFIG_KIOBUF_IO
/* Works for scsi-disks, else get back -ENOSYS */
if ((func == _page_buf_page_apply) && error == 0) {
--- 1975,1984 ----
cur_offset = kb->offset;
cur_len = kb->length;
if (cur_len > buffer_len) {
cur_len = buffer_len;
! printk("pagebuf_segment_apply count changed to
%d\n",cur_len);
}
+
#if CONFIG_KIOBUF_IO
/* Works for scsi-disks, else get back -ENOSYS */
if ((func == _page_buf_page_apply) && error == 0) {
|