diff -urN linux-2.4.3-XFS.orig/fs/pagebuf/page_buf.c linux-2.4.3-XFS/fs/pagebuf/page_buf.c --- linux-2.4.3-XFS.orig/fs/pagebuf/page_buf.c Sat Apr 7 11:21:03 2001 +++ linux-2.4.3-XFS/fs/pagebuf/page_buf.c Sat Apr 7 11:25:50 2001 @@ -1342,7 +1342,12 @@ bh = kmem_cache_alloc(bh_cachep, SLAB_PAGE_IO); if (bh == NULL){ err = -ENOMEM; - goto error; + /* If we ever do get here then clean up what we already did */ + for (itr=0; itr < cnt; itr++) { + atomic_set_buffer_clean (bufferlist[itr]); + bufferlist[itr]->b_end_io(bufferlist[itr], 0); + } + return err; } memset(bh, 0, sizeof(*bh)); init_waitqueue_head(&bh->b_wait); @@ -1389,13 +1394,6 @@ } return err; -error: - /* If we ever do get here then clean up what we already did */ - for (itr=0; itr < cnt; itr++) { - atomic_set_buffer_clean (bufferlist[itr]); - bufferlist[itr]->b_end_io(bufferlist[itr], 0); - } - return err; } /* Apply function for pagebuf_segment_apply */ @@ -1817,26 +1815,25 @@ /* Bounce ... ouch! */ *error = setup_kiobuf_bounce_pages(kb, GFP_USER); - if (*error) - goto error; + + /* + * Release locked pages on error. Failed retries would have + * reset pgcnt and have released all the locks. So do nothing + * for that case. Also release any allocated bounce buffers. + */ + if (*error) { + cleanup_bounce_buffers(rw, 1, &kb, -1); + if (locking) + unlock_kiovec(1, &kb, kb->nr_pages); + return; + } + if (rw & WRITE) kiobuf_copy_bounce(kb, COPY_TO_BOUNCE, -1); kb->end_io = pagebuf_end_kiobuf_io; kb->errno = 0; ll_rw_kio(rw, kb, dev, blocknr, blksize, error); - - /* - * Release locked pages on error. Failed retries would have - * reset pgcnt and have released all the locks. So do nothing - * for that case. Also release any allocated bounce buffers. - */ - error: - if (*error) { - cleanup_bounce_buffers(rw, 1, &kb, -1); - if (locking) - unlock_kiovec(1, &kb, kb->nr_pages); - } } #endif