xfs
[Top] [All Lists]

Re: [dm-devel] [PATCH 23/42] md/raid: set bi_op to REQ_OP

To: mchristi@xxxxxxxxxx
Subject: Re: [dm-devel] [PATCH 23/42] md/raid: set bi_op to REQ_OP
From: Shaun Tancheff <shaun.tancheff@xxxxxxxxxxx>
Date: Sat, 23 Apr 2016 18:26:34 -0500
Cc: linux-f2fs-devel@xxxxxxxxxxxxxxxxxxxxx, linux-ext4@xxxxxxxxxxxxxxx, konrad.wilk@xxxxxxxxxx, drbd-dev@xxxxxxxxxxxxxxxx, philipp.reisner@xxxxxxxxxx, lars.ellenberg@xxxxxxxxxx, linux-raid@xxxxxxxxxxxxxxx, dm-devel@xxxxxxxxxx, linux-fsdevel@xxxxxxxxxxxxxxx, linux-bcache@xxxxxxxxxxxxxxx, linux-block@xxxxxxxxxxxxxxx, LKML <linux-kernel@xxxxxxxxxxxxxxx>, linux-scsi@xxxxxxxxxxxxxxx, linux-mtd@xxxxxxxxxxxxxxxxxxx, target-devel@xxxxxxxxxxxxxxx, linux-btrfs@xxxxxxxxxxxxxxx, osd-dev@xxxxxxxxxxxx, xfs@xxxxxxxxxxx, ocfs2-devel@xxxxxxxxxxxxxx
Delivered-to: xfs@xxxxxxxxxxx
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/simple; d=seagate.com; h=mime-version : in-reply-to : references : from : date : message-id : subject : to : cc : content-type; s=proofpoint; bh=Z5vK9Vn/Nf3oiEUTv6bF1jQTxS6q0TEg86QTiNe3l38=; b=YkPD7lFZdQIIhx2KpLWA+5gFUtys4RJ6Le1EnHUs/IpQQXmZ1tofaOrBVx+yAW+rDNel wP5hWf7LFTplVO0VCjLdLw2HmkY4XkjiS4Swd5E6USQ2q2HFMB4bpbjOySkEgGPpvndq WnoOMoz60Q22EkdnuA6nWmfZulbkO3R4W3iTR6JHGWp6sbyWiaSjn0hSPDsJhaiSgsxF BTkK8TDKaOcLRpN9mffbvCe6ViFB0w/0+tzt79CQo6857V56yMJW3U97hYIg6joKwBkX K3eUqZF1xczTQGeHT6GdLyxbsHzrCy+ebWZuBx3WAk0upsMwrhK2vr2Xx6kGVW0Wo0ga kQ==
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=seagate.com; s=google; h=mime-version:in-reply-to:references:from:date:message-id:subject:to :cc; bh=dTGDkU2WvzMZxQAIoLtlE+ibMHjwd54YUwIizkkY0d4=; b=LVlFtQFwKkXacFoVxDLMCr01roMQ4Xxe+RM/FZn3nz8aPy92wkf9PfTDS7txQbmS6c poy9l+pumDKQaWvgnzBEPKRr6e9za0++dVD/TzExFUmVPftvl+uhGRxpshu7sgmmPzB/ +g72YYCNRRamhwu7S4dxo581H/7hRyR3+/ZeU=
In-reply-to: <1460716802-2294-24-git-send-email-mchristi@xxxxxxxxxx>
References: <1460716802-2294-1-git-send-email-mchristi@xxxxxxxxxx> <1460716802-2294-24-git-send-email-mchristi@xxxxxxxxxx>
On Fri, Apr 15, 2016 at 5:39 AM, <mchristi@xxxxxxxxxx> wrote:
From: Mike Christie <mchristi@xxxxxxxxxx>

This patch has md use bio->bi_op for REQ_OPs and rq_flag_bits
to bio->bi_rw.

Signed-off-by: Mike Christie <mchristi@xxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Reviewed-by: Hannes Reinecke <hare@xxxxxxxx>
---
Âdrivers/md/bitmap.c   | 2 +-
Âdrivers/md/dm-raid.c  Â| 5 +++--
Âdrivers/md/md.c     | 11 +++++++----
Âdrivers/md/md.h     | 3 ++-
Âdrivers/md/raid1.c   Â| 34 ++++++++++++++++----------------
Âdrivers/md/raid10.c   | 50 ++++++++++++++++++++++++++----------------------
Âdrivers/md/raid5-cache.c | 25 +++++++++++++++---------
Âdrivers/md/raid5.c   Â| 48 ++++++++++++++++++++++++++--------------------
Â8 files changed, 101 insertions(+), 77 deletions(-)


Sorry I though this would thread propertly:Â

In raid0.c, raid10.c and raid5.c

A couple of checks for REQ_PREFLUSH flag should also check for
bi_op matching REQ_OP_FLUSH.

In raid1.c [r1_sync_page_io()] and raid10.c [r10_sync_page_io()]

Wrappers for sync_page_io() are passed READ/WRITE but need to
be passed REQ_OP_READ and REQ_OP_WRITE.

Anyway my raid testing was getting weird hangs and corruption
without the patch.

Thanks!

Â
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 8b2e16f..9e8019e 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -159,7 +159,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,

        if (sync_page_io(rdev, target,
                Âroundup(size, bdev_logical_block_size(rdev->bdev)),
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â page, READ, true)) {
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â page, REQ_OP_READ, 0, true)) {
            page->index = index;
            return 0;
        }
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a090121..43a749c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -792,7 +792,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
    if (rdev->sb_loaded)
        return 0;

-Â Â Â Âif (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+Â Â Â Âif (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) {
        DMERR("Failed to read superblock of device at position %d",
           rdev->raid_disk);
        md_error(rdev->mddev, rdev);
@@ -1646,7 +1646,8 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
    for (i = 0; i < rs->md.raid_disks; i++) {
        r = &rs->dev[i].rdev;
        if (test_bit(Faulty, &r->flags) && r->sb_page &&
-Â Â Â Â Â Â Â Â Â Âsync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+Â Â Â Â Â Â Â Â Â Âsync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1)) {
            DMINFO("Faulty %s device #%d has readable super block."
               Â" Attempting to revive it.",
               Ârs->raid_type->name, i);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ec3c98d..9c40368 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -392,6 +392,7 @@ static void submit_flushes(struct work_struct *ws)
            bi->bi_end_io = md_end_flush;
            bi->bi_private = rdev;
            bi->bi_bdev = rdev->bdev;
+Â Â Â Â Â Â Â Â Â Â Â Âbi->bi_op = REQ_OP_WRITE;
            bi->bi_rw = WRITE_FLUSH;
            atomic_inc(&mddev->flush_pending);
            submit_bio(bi);
@@ -741,6 +742,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
    bio_add_page(bio, page, size, 0);
    bio->bi_private = rdev;
    bio->bi_end_io = super_written;
+Â Â Â Âbio->bi_op = REQ_OP_WRITE;
    bio->bi_rw = WRITE_FLUSH_FUA;

    atomic_inc(&mddev->pending_writes);
@@ -754,14 +756,15 @@ void md_super_wait(struct mddev *mddev)
Â}

Âint sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
-Â Â Â Â Â Â Â Â struct page *page, int rw, bool metadata_op)
+Â Â Â Â Â Â Â Â struct page *page, int op, int op_flags, bool metadata_op)
Â{
    struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
    int ret;

    bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
        rdev->meta_bdev : rdev->bdev;
-Â Â Â Âbio->bi_rw = rw;
+Â Â Â Âbio->bi_op = op;
+Â Â Â Âbio->bi_rw = op_flags;
    if (metadata_op)
        bio->bi_iter.bi_sector = sector + rdev->sb_start;
    else if (rdev->mddev->reshape_position != MaxSector &&
@@ -787,7 +790,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
    if (rdev->sb_loaded)
        return 0;

-Â Â Â Âif (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
+Â Â Â Âif (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
        goto fail;
    rdev->sb_loaded = 1;
    return 0;
@@ -1473,7 +1476,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
            return -EINVAL;
        bb_sector = (long long)offset;
        if (!sync_page_io(rdev, bb_sector, sectors << 9,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Ârdev->bb_page, READ, true))
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Ârdev->bb_page, REQ_OP_READ, 0, true))
            return -EIO;
        bbp = (u64 *)page_address(rdev->bb_page);
        rdev->badblocks.shift = sb->bblog_shift;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b5c4be7..2e0918f 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -618,7 +618,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
             Âsector_t sector, int size, struct page *page);
Âextern void md_super_wait(struct mddev *mddev);
Âextern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
-Â Â Â Â Â Â Â Â Â Â Â Âstruct page *page, int rw, bool metadata_op);
+Â Â Â Â Â Â Â Â Â Â Â Âstruct page *page, int op, int op_flags,
+Â Â Â Â Â Â Â Â Â Â Â Âbool metadata_op);
Âextern void md_do_sync(struct md_thread *thread);
Âextern void md_new_event(struct mddev *mddev);
Âextern int md_allow_write(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 424df7e..c7abd2d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -759,7 +759,7 @@ static void flush_pending_writes(struct r1conf *conf)
        while (bio) { /* submit pending writes */
            struct bio *next = bio->bi_next;
            bio->bi_next = NULL;
-Â Â Â Â Â Â Â Â Â Â Â Âif (unlikely((bio->bi_rw & REQ_DISCARD) &&
+Â Â Â Â Â Â Â Â Â Â Â Âif (unlikely((bio->bi_op == REQ_OP_DISCARD) &&
              !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
                /* Just ignore it */
                bio_endio(bio);
@@ -1033,7 +1033,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
    while (bio) { /* submit pending writes */
        struct bio *next = bio->bi_next;
        bio->bi_next = NULL;
-Â Â Â Â Â Â Â Âif (unlikely((bio->bi_rw & REQ_DISCARD) &&
+Â Â Â Â Â Â Â Âif (unlikely((bio->bi_op == REQ_OP_DISCARD) &&
          !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
            /* Just ignore it */
            bio_endio(bio);
@@ -1053,12 +1053,11 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
    int i, disks;
    struct bitmap *bitmap;
    unsigned long flags;
+Â Â Â Âconst int op = bio->bi_op;
    const int rw = bio_data_dir(bio);
    const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
    const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
-Â Â Â Âconst unsigned long do_discard = (bio->bi_rw
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â& (REQ_DISCARD | REQ_SECURE));
-Â Â Â Âconst unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+Â Â Â Âconst unsigned long do_sec = (bio->bi_rw & REQ_SECURE);
    struct md_rdev *blocked_rdev;
    struct blk_plug_cb *cb;
    struct raid1_plug_cb *plug = NULL;
@@ -1166,7 +1165,8 @@ read_again:
            mirror->rdev->data_offset;
        read_bio->bi_bdev = mirror->rdev->bdev;
        read_bio->bi_end_io = raid1_end_read_request;
-Â Â Â Â Â Â Â Âread_bio->bi_rw = READ | do_sync;
+Â Â Â Â Â Â Â Âread_bio->bi_op = op;
+Â Â Â Â Â Â Â Âread_bio->bi_rw = do_sync;
        read_bio->bi_private = r1_bio;

        if (max_sectors < r1_bio->sectors) {
@@ -1376,8 +1376,9 @@ read_again:
                 Âconf->mirrors[i].rdev->data_offset);
        mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
        mbio->bi_end_io = raid1_end_write_request;
+Â Â Â Â Â Â Â Âmbio->bi_op = op;
        mbio->bi_rw =
-Â Â Â Â Â Â Â Â Â Â Â ÂWRITE | do_flush_fua | do_sync | do_discard | do_same;
+Â Â Â Â Â Â Â Â Â Â Â Âdo_flush_fua | do_sync | do_sec;
        mbio->bi_private = r1_bio;

        atomic_inc(&r1_bio->remaining);
@@ -1771,7 +1772,7 @@ static void end_sync_write(struct bio *bio)
Âstatic int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
              int sectors, struct page *page, int rw)
Â{
-Â Â Â Âif (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+Â Â Â Âif (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
        /* success */
        return 1;
    if (rw == WRITE) {
@@ -1825,7 +1826,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                rdev = conf->mirrors[d].rdev;
                if (sync_page_io(rdev, sect, s<<9,
                        Âbio->bi_io_vec[idx].bv_page,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â READ, false)) {
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â REQ_OP_READ, 0, false)) {
                    success = 1;
                    break;
                }
@@ -2030,7 +2031,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
           !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
            continue;

-Â Â Â Â Â Â Â Âwbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Âwbio->bi_op = REQ_OP_WRITE;
        wbio->bi_end_io = end_sync_write;
        atomic_inc(&r1_bio->remaining);
        md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2090,7 +2091,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
              is_badblock(rdev, sect, s,
                    &first_bad, &bad_sectors) == 0 &&
              sync_page_io(rdev, sect, s<<9,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â conf->tmppage, READ, false))
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â conf->tmppage, REQ_OP_READ, 0, false))
                success = 1;
            else {
                d++;
@@ -2201,7 +2202,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
            wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
        }

-Â Â Â Â Â Â Â Âwbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Âwbio->bi_op = REQ_OP_WRITE;
        wbio->bi_iter.bi_sector = r1_bio->sector;
        wbio->bi_iter.bi_size = r1_bio->sectors << 9;

@@ -2344,7 +2345,8 @@ read_more:
        bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
        bio->bi_bdev = rdev->bdev;
        bio->bi_end_io = raid1_end_read_request;
-Â Â Â Â Â Â Â Âbio->bi_rw = READ | do_sync;
+Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_READ;
+Â Â Â Â Â Â Â Âbio->bi_rw = do_sync;
        bio->bi_private = r1_bio;
        if (max_sectors < r1_bio->sectors) {
            /* Drat - have to split this up more */
@@ -2572,7 +2574,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
            if (i < conf->raid_disks)
                still_degraded = 1;
        } else if (!test_bit(In_sync, &rdev->flags)) {
-Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_WRITE;
            bio->bi_end_io = end_sync_write;
            write_targets ++;
        } else {
@@ -2599,7 +2601,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                    if (disk < 0)
                        disk = i;
                }
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = READ;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_READ;
                bio->bi_end_io = end_sync_read;
                read_targets++;
            } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
@@ -2611,7 +2613,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                Â* if we are doing resync or repair. Otherwise, leave
                Â* this device alone for this sync request.
                Â*/
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_WRITE;
                bio->bi_end_io = end_sync_write;
                write_targets++;
            }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4736be8..63cd985 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf)
        while (bio) { /* submit pending writes */
            struct bio *next = bio->bi_next;
            bio->bi_next = NULL;
-Â Â Â Â Â Â Â Â Â Â Â Âif (unlikely((bio->bi_rw & REQ_DISCARD) &&
+Â Â Â Â Â Â Â Â Â Â Â Âif (unlikely((bio->bi_op ==Â REQ_OP_DISCARD) &&
              !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
                /* Just ignore it */
                bio_endio(bio);
@@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
    while (bio) { /* submit pending writes */
        struct bio *next = bio->bi_next;
        bio->bi_next = NULL;
-Â Â Â Â Â Â Â Âif (unlikely((bio->bi_rw & REQ_DISCARD) &&
+Â Â Â Â Â Â Â Âif (unlikely((bio->bi_op ==Â REQ_OP_DISCARD) &&
          !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
            /* Just ignore it */
            bio_endio(bio);
@@ -1058,12 +1058,11 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
    struct r10bio *r10_bio;
    struct bio *read_bio;
    int i;
+Â Â Â Âconst int op = bio->bi_op;
    const int rw = bio_data_dir(bio);
    const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
    const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
-Â Â Â Âconst unsigned long do_discard = (bio->bi_rw
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â& (REQ_DISCARD | REQ_SECURE));
-Â Â Â Âconst unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+Â Â Â Âconst unsigned long do_sec = (bio->bi_rw & REQ_SECURE);
    unsigned long flags;
    struct md_rdev *blocked_rdev;
    struct blk_plug_cb *cb;
@@ -1156,7 +1155,8 @@ read_again:
            choose_data_offset(r10_bio, rdev);
        read_bio->bi_bdev = rdev->bdev;
        read_bio->bi_end_io = raid10_end_read_request;
-Â Â Â Â Â Â Â Âread_bio->bi_rw = READ | do_sync;
+Â Â Â Â Â Â Â Âread_bio->bi_op = op;
+Â Â Â Â Â Â Â Âread_bio->bi_rw = do_sync;
        read_bio->bi_private = r10_bio;

        if (max_sectors < r10_bio->sectors) {
@@ -1363,8 +1363,9 @@ retry_write:
                               rdev));
            mbio->bi_bdev = rdev->bdev;
            mbio->bi_end_io = raid10_end_write_request;
+Â Â Â Â Â Â Â Â Â Â Â Âmbio->bi_op = op;
            mbio->bi_rw =
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂWRITE | do_sync | do_fua | do_discard | do_same;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âdo_sync | do_fua | do_sec;
            mbio->bi_private = r10_bio;

            atomic_inc(&r10_bio->remaining);
@@ -1406,8 +1407,9 @@ retry_write:
                         Âr10_bio, rdev));
            mbio->bi_bdev = rdev->bdev;
            mbio->bi_end_io = raid10_end_write_request;
+Â Â Â Â Â Â Â Â Â Â Â Âmbio->bi_op = op;
            mbio->bi_rw =
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂWRITE | do_sync | do_fua | do_discard | do_same;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âdo_sync | do_fua | do_sec;
            mbio->bi_private = r10_bio;

            atomic_inc(&r10_bio->remaining);
@@ -1992,7 +1994,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)

        tbio->bi_vcnt = vcnt;
        tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
-Â Â Â Â Â Â Â Âtbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Âtbio->bi_op = REQ_OP_WRITE;
        tbio->bi_private = r10_bio;
        tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
        tbio->bi_end_io = end_sync_write;
@@ -2078,7 +2080,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
                 addr,
                 s << 9,
                 bio->bi_io_vec[idx].bv_page,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂREAD, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂREQ_OP_READ, 0, false);
        if (ok) {
            rdev = conf->mirrors[dw].rdev;
            addr = r10_bio->devs[1].addr + sect;
@@ -2086,7 +2088,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
                     addr,
                     s << 9,
                     bio->bi_io_vec[idx].bv_page,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂWRITE, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂREQ_OP_WRITE, 0, false);
            if (!ok) {
                set_bit(WriteErrorSeen, &rdev->flags);
                if (!test_and_set_bit(WantReplacement,
@@ -2213,7 +2215,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
    if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
      && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
        return -1;
-Â Â Â Âif (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+Â Â Â Âif (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
        /* success */
        return 1;
    if (rw == WRITE) {
@@ -2299,7 +2301,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                           Âr10_bio->devs[sl].addr +
                           Âsect,
                           Âs<<9,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â conf->tmppage, READ, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â conf->tmppage,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â REQ_OP_READ, 0, false);
                rdev_dec_pending(rdev, mddev);
                rcu_read_lock();
                if (success)
@@ -2474,7 +2477,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                 Âchoose_data_offset(r10_bio, rdev) +
                 Â(sector - r10_bio->sector));
        wbio->bi_bdev = rdev->bdev;
-Â Â Â Â Â Â Â Âwbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Âwbio->bi_op = REQ_OP_WRITE;

        if (submit_bio_wait(wbio) < 0)
            /* Failure! */
@@ -2550,7 +2553,8 @@ read_more:
    bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
        + choose_data_offset(r10_bio, rdev);
    bio->bi_bdev = rdev->bdev;
-Â Â Â Âbio->bi_rw = READ | do_sync;
+Â Â Â Âbio->bi_op = REQ_OP_READ;
+Â Â Â Âbio->bi_rw = do_sync;
    bio->bi_private = r10_bio;
    bio->bi_end_io = raid10_end_read_request;
    if (max_sectors < r10_bio->sectors) {
@@ -3040,7 +3044,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                biolist = bio;
                bio->bi_private = r10_bio;
                bio->bi_end_io = end_sync_read;
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = READ;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_READ;
                from_addr = r10_bio->devs[j].addr;
                bio->bi_iter.bi_sector = from_addr +
                    rdev->data_offset;
@@ -3066,7 +3070,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                    biolist = bio;
                    bio->bi_private = r10_bio;
                    bio->bi_end_io = end_sync_write;
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_WRITE;
                    bio->bi_iter.bi_sector = to_addr
                        + rdev->data_offset;
                    bio->bi_bdev = rdev->bdev;
@@ -3095,7 +3099,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                biolist = bio;
                bio->bi_private = r10_bio;
                bio->bi_end_io = end_sync_write;
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_WRITE;
                bio->bi_iter.bi_sector = to_addr +
                    rdev->data_offset;
                bio->bi_bdev = rdev->bdev;
@@ -3215,7 +3219,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
            biolist = bio;
            bio->bi_private = r10_bio;
            bio->bi_end_io = end_sync_read;
-Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = READ;
+Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_READ;
            bio->bi_iter.bi_sector = sector +
                conf->mirrors[d].rdev->data_offset;
            bio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -3237,7 +3241,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
            biolist = bio;
            bio->bi_private = r10_bio;
            bio->bi_end_io = end_sync_write;
-Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_rw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Âbio->bi_op = REQ_OP_WRITE;
            bio->bi_iter.bi_sector = sector +
                conf->mirrors[d].replacement->data_offset;
            bio->bi_bdev = conf->mirrors[d].replacement->bdev;
@@ -4320,7 +4324,7 @@ read_more:
               Â+ rdev->data_offset);
    read_bio->bi_private = r10_bio;
    read_bio->bi_end_io = end_sync_read;
-Â Â Â Âread_bio->bi_rw = READ;
+Â Â Â Âread_bio->bi_op = REQ_OP_READ;
    read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
    read_bio->bi_error = 0;
    read_bio->bi_vcnt = 0;
@@ -4354,7 +4358,7 @@ read_more:
            rdev2->new_data_offset;
        b->bi_private = r10_bio;
        b->bi_end_io = end_reshape_write;
-Â Â Â Â Â Â Â Âb->bi_rw = WRITE;
+Â Â Â Â Â Â Â Âb->bi_op = REQ_OP_WRITE;
        b->bi_next = blist;
        blist = b;
    }
@@ -4522,7 +4526,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
                       Âaddr,
                       Âs << 9,
                       Âbvec[idx].bv_page,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â READ, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â REQ_OP_READ, 0, false);
            if (success)
                break;
        failed:
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 90c2618..56b20c3 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -261,7 +261,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log)
Â{
    struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);

-Â Â Â Âbio->bi_rw = WRITE;
+Â Â Â Âbio->bi_op = REQ_OP_WRITE;
    bio->bi_bdev = log->rdev->bdev;
    bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;

@@ -686,6 +686,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
    bio_reset(&log->flush_bio);
    log->flush_bio.bi_bdev = log->rdev->bdev;
    log->flush_bio.bi_end_io = r5l_log_flush_endio;
+Â Â Â Âlog->flush_bio.bi_op = REQ_OP_WRITE;
    log->flush_bio.bi_rw = WRITE_FLUSH;
    submit_bio(&log->flush_bio);
Â}
@@ -882,7 +883,8 @@ static int r5l_read_meta_block(struct r5l_log *log,
    struct r5l_meta_block *mb;
    u32 crc, stored_crc;

-Â Â Â Âif (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
+Â Â Â Âif (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
+Â Â Â Â Â Â Â Â Â Â Â Â Âfalse))
        return -EIO;

    mb = page_address(page);
@@ -927,7 +929,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
                      Â&disk_index, sh);

            sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, READ, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, REQ_OP_READ, 0,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â false);
            sh->dev[disk_index].log_checksum =
                le32_to_cpu(payload->checksum[0]);
            set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -935,7 +938,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
        } else {
            disk_index = sh->pd_idx;
            sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, READ, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, REQ_OP_READ, 0,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â false);
            sh->dev[disk_index].log_checksum =
                le32_to_cpu(payload->checksum[0]);
            set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -945,7 +949,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
                sync_page_io(log->rdev,
                      Âr5l_ring_add(log, *log_offset, BLOCK_SECTORS),
                      ÂPAGE_SIZE, sh->dev[disk_index].page,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â READ, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â REQ_OP_READ, 0, false);
                sh->dev[disk_index].log_checksum =
                    le32_to_cpu(payload->checksum[1]);
                set_bit(R5_Wantwrite,
@@ -987,11 +991,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
        rdev = rcu_dereference(conf->disks[disk_index].rdev);
        if (rdev)
            sync_page_io(rdev, stripe_sect, PAGE_SIZE,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, WRITE, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â false);
        rrdev = rcu_dereference(conf->disks[disk_index].replacement);
        if (rrdev)
            sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, WRITE, false);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â false);
    }
    raid5_release_stripe(sh);
    return 0;
@@ -1063,7 +1069,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
    crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
    mb->checksum = cpu_to_le32(crc);

-Â Â Â Âif (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
+Â Â Â Âif (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+Â Â Â Â Â Â Â Â Â Â Â Â ÂWRITE_FUA, false)) {
        __free_page(page);
        return -EIO;
    }
@@ -1138,7 +1145,7 @@ static int r5l_load_log(struct r5l_log *log)
    if (!page)
        return -ENOMEM;

-Â Â Â Âif (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
+Â Â Â Âif (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
        ret = -EIO;
        goto ioerr;
    }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ab8b65..c36b817 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
    dd_idx = 0;
    while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
        dd_idx++;
-Â Â Â Âif (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+Â Â Â Âif (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+Â Â Â Â Â Âhead->dev[dd_idx].towrite->bi_op != sh->dev[dd_idx].towrite->bi_op)
        goto unlock_out;

    if (head->batch_head) {
@@ -891,29 +892,32 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
    if (r5l_write_stripe(conf->log, sh) == 0)
        return;
    for (i = disks; i--; ) {
-Â Â Â Â Â Â Â Âint rw;
+Â Â Â Â Â Â Â Âint op;
+Â Â Â Â Â Â Â Âint op_flags = 0;
        int replace_only = 0;
        struct bio *bi, *rbi;
        struct md_rdev *rdev, *rrdev = NULL;

        sh = head_sh;
        if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
-Â Â Â Â Â Â Â Â Â Â Â Âif (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Ârw = WRITE_FUA;
-Â Â Â Â Â Â Â Â Â Â Â Âelse
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Ârw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Âif (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) {
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âop = REQ_OP_WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âop_flags = WRITE_FUA;
+Â Â Â Â Â Â Â Â Â Â Â Â} else {
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âop = REQ_OP_WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Â}
            if (test_bit(R5_Discard, &sh->dev[i].flags))
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Ârw |= REQ_DISCARD;
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âop = REQ_OP_DISCARD;
        } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
-Â Â Â Â Â Â Â Â Â Â Â Ârw = READ;
+Â Â Â Â Â Â Â Â Â Â Â Âop = REQ_OP_READ;
        else if (test_and_clear_bit(R5_WantReplace,
                      &sh->dev[i].flags)) {
-Â Â Â Â Â Â Â Â Â Â Â Ârw = WRITE;
+Â Â Â Â Â Â Â Â Â Â Â Âop = REQ_OP_WRITE;
            replace_only = 1;
        } else
            continue;
        if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
-Â Â Â Â Â Â Â Â Â Â Â Ârw |= REQ_SYNC;
+Â Â Â Â Â Â Â Â Â Â Â Âop_flags |= REQ_SYNC;

Âagain:
        bi = &sh->dev[i].req;
@@ -927,7 +931,7 @@ again:
            rdev = rrdev;
            rrdev = NULL;
        }
-Â Â Â Â Â Â Â Âif (rw & WRITE) {
+Â Â Â Â Â Â Â Âif (op_is_write(op)) {
            if (replace_only)
                rdev = NULL;
            if (rdev == rrdev)
@@ -953,7 +957,7 @@ again:
        Â* need to check for writes. We never accept write errors
        Â* on the replacement, so we don't to check rrdev.
        Â*/
-Â Â Â Â Â Â Â Âwhile ((rw & WRITE) && rdev &&
+Â Â Â Â Â Â Â Âwhile (op_is_write(op) && rdev &&
           Âtest_bit(WriteErrorSeen, &rdev->flags)) {
            sector_t first_bad;
            int bad_sectors;
@@ -995,8 +999,9 @@ again:

            bio_reset(bi);
            bi->bi_bdev = rdev->bdev;
-Â Â Â Â Â Â Â Â Â Â Â Âbi->bi_rw = rw;
-Â Â Â Â Â Â Â Â Â Â Â Âbi->bi_end_io = (rw & WRITE)
+Â Â Â Â Â Â Â Â Â Â Â Âbi->bi_op = op;
+Â Â Â Â Â Â Â Â Â Â Â Âbi->bi_rw = op_flags;
+Â Â Â Â Â Â Â Â Â Â Â Âbi->bi_end_io = op_is_write(op)
                ? raid5_end_write_request
                : raid5_end_read_request;
            bi->bi_private = sh;
@@ -1027,7 +1032,7 @@ again:
            Â* If this is discard request, set bi_vcnt 0. We don't
            Â* want to confuse SCSI because SCSI will replace payload
            Â*/
-Â Â Â Â Â Â Â Â Â Â Â Âif (rw & REQ_DISCARD)
+Â Â Â Â Â Â Â Â Â Â Â Âif (op == REQ_OP_DISCARD)
                bi->bi_vcnt = 0;
            if (rrdev)
                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -1047,8 +1052,9 @@ again:

            bio_reset(rbi);
            rbi->bi_bdev = rrdev->bdev;
-Â Â Â Â Â Â Â Â Â Â Â Ârbi->bi_rw = rw;
-Â Â Â Â Â Â Â Â Â Â Â ÂBUG_ON(!(rw & WRITE));
+Â Â Â Â Â Â Â Â Â Â Â Ârbi->bi_op = op;
+Â Â Â Â Â Â Â Â Â Â Â Ârbi->bi_rw = op_flags;
+Â Â Â Â Â Â Â Â Â Â Â ÂBUG_ON(!op_is_write(op));
            rbi->bi_end_io = raid5_end_write_request;
            rbi->bi_private = sh;

@@ -1076,7 +1082,7 @@ again:
            Â* If this is discard request, set bi_vcnt 0. We don't
            Â* want to confuse SCSI because SCSI will replace payload
            Â*/
-Â Â Â Â Â Â Â Â Â Â Â Âif (rw & REQ_DISCARD)
+Â Â Â Â Â Â Â Â Â Â Â Âif (op == REQ_OP_DISCARD)
                rbi->bi_vcnt = 0;
            if (conf->mddev->gendisk)
                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
@@ -1085,7 +1091,7 @@ again:
            generic_make_request(rbi);
        }
        if (!rdev && !rrdev) {
-Â Â Â Â Â Â Â Â Â Â Â Âif (rw & WRITE)
+Â Â Â Â Â Â Â Â Â Â Â Âif (op_is_write(op))
                set_bit(STRIPE_DEGRADED, &sh->state);
            pr_debug("skip op %ld on disc %d for sector %llu\n",
                bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -1623,7 +1629,7 @@ again:
                    set_bit(R5_WantFUA, &dev->flags);
                if (wbi->bi_rw & REQ_SYNC)
                    set_bit(R5_SyncIO, &dev->flags);
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âif (wbi->bi_rw & REQ_DISCARD)
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âif (wbi->bi_op == REQ_OP_DISCARD)
                    set_bit(R5_Discard, &dev->flags);
                else {
                    tx = async_copy_data(1, wbi, &dev->page,
@@ -5178,7 +5184,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
            return;
    }

-Â Â Â Âif (unlikely(bi->bi_rw & REQ_DISCARD)) {
+Â Â Â Âif (unlikely(bi->bi_op == REQ_OP_DISCARD)) {
        make_discard_request(mddev, bi);
        return;
    }
--
2.7.2

--
dm-devel mailing list
dm-devel@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/dm-devel



--
Shaun Tancheff

<Prev in Thread] Current Thread [Next in Thread>