mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
md/md-bitmap: remove the last parameter for bimtap_ops->endwrite()
For the case that IO failed for one rdev, the bit will be mark as NEEDED in following cases: 1) If badblocks is set and rdev is not faulty; 2) If rdev is faulty; Case 1) is useless because synchronize data to badblocks make no sense. Case 2) can be replaced with mddev->degraded. Also remove R1BIO_Degraded, R10BIO_Degraded and STRIPE_DEGRADED since case 2) no longer use them. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Link: https://lore.kernel.org/r/20250109015145.158868-3-yukuai1@huaweicloud.com Signed-off-by: Song Liu <song@kernel.org>
This commit is contained in:
parent
08c50142a1
commit
4f0e7d0e03
@ -1726,7 +1726,7 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
|
||||
}
|
||||
|
||||
static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
|
||||
unsigned long sectors, bool success)
|
||||
unsigned long sectors)
|
||||
{
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
|
||||
@ -1745,15 +1745,16 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
|
||||
return;
|
||||
}
|
||||
|
||||
if (success && !bitmap->mddev->degraded &&
|
||||
bitmap->events_cleared < bitmap->mddev->events) {
|
||||
bitmap->events_cleared = bitmap->mddev->events;
|
||||
bitmap->need_sync = 1;
|
||||
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
|
||||
}
|
||||
|
||||
if (!success && !NEEDED(*bmc))
|
||||
if (!bitmap->mddev->degraded) {
|
||||
if (bitmap->events_cleared < bitmap->mddev->events) {
|
||||
bitmap->events_cleared = bitmap->mddev->events;
|
||||
bitmap->need_sync = 1;
|
||||
sysfs_notify_dirent_safe(
|
||||
bitmap->sysfs_can_clear);
|
||||
}
|
||||
} else if (!NEEDED(*bmc)) {
|
||||
*bmc |= NEEDED_MASK;
|
||||
}
|
||||
|
||||
if (COUNTER(*bmc) == COUNTER_MAX)
|
||||
wake_up(&bitmap->overflow_wait);
|
||||
|
@ -92,7 +92,7 @@ struct bitmap_operations {
|
||||
int (*startwrite)(struct mddev *mddev, sector_t offset,
|
||||
unsigned long sectors);
|
||||
void (*endwrite)(struct mddev *mddev, sector_t offset,
|
||||
unsigned long sectors, bool success);
|
||||
unsigned long sectors);
|
||||
bool (*start_sync)(struct mddev *mddev, sector_t offset,
|
||||
sector_t *blocks, bool degraded);
|
||||
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
|
||||
|
@ -423,8 +423,7 @@ static void close_write(struct r1bio *r1_bio)
|
||||
if (test_bit(R1BIO_BehindIO, &r1_bio->state))
|
||||
mddev->bitmap_ops->end_behind_write(mddev);
|
||||
/* clear the bitmap if all writes complete successfully */
|
||||
mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
|
||||
!test_bit(R1BIO_Degraded, &r1_bio->state));
|
||||
mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors);
|
||||
md_write_end(mddev);
|
||||
}
|
||||
|
||||
@ -481,8 +480,6 @@ static void raid1_end_write_request(struct bio *bio)
|
||||
if (!test_bit(Faulty, &rdev->flags))
|
||||
set_bit(R1BIO_WriteError, &r1_bio->state);
|
||||
else {
|
||||
/* Fail the request */
|
||||
set_bit(R1BIO_Degraded, &r1_bio->state);
|
||||
/* Finished with this branch */
|
||||
r1_bio->bios[mirror] = NULL;
|
||||
to_put = bio;
|
||||
@ -1536,11 +1533,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
write_behind = true;
|
||||
|
||||
r1_bio->bios[i] = NULL;
|
||||
if (!rdev || test_bit(Faulty, &rdev->flags)) {
|
||||
if (i < conf->raid_disks)
|
||||
set_bit(R1BIO_Degraded, &r1_bio->state);
|
||||
if (!rdev || test_bit(Faulty, &rdev->flags))
|
||||
continue;
|
||||
}
|
||||
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
if (test_bit(WriteErrorSeen, &rdev->flags)) {
|
||||
@ -1559,16 +1553,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
*/
|
||||
max_sectors = bad_sectors;
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
/* We don't set R1BIO_Degraded as that
|
||||
* only applies if the disk is
|
||||
* missing, so it might be re-added,
|
||||
* and we want to know to recover this
|
||||
* chunk.
|
||||
* In this case the device is here,
|
||||
* and the fact that this chunk is not
|
||||
* in-sync is recorded in the bad
|
||||
* block log
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
if (is_bad) {
|
||||
@ -2616,12 +2600,10 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
||||
* errors.
|
||||
*/
|
||||
fail = true;
|
||||
if (!narrow_write_error(r1_bio, m)) {
|
||||
if (!narrow_write_error(r1_bio, m))
|
||||
md_error(conf->mddev,
|
||||
conf->mirrors[m].rdev);
|
||||
/* an I/O failed, we can't clear the bitmap */
|
||||
set_bit(R1BIO_Degraded, &r1_bio->state);
|
||||
}
|
||||
rdev_dec_pending(conf->mirrors[m].rdev,
|
||||
conf->mddev);
|
||||
}
|
||||
@ -2712,8 +2694,6 @@ static void raid1d(struct md_thread *thread)
|
||||
list_del(&r1_bio->retry_list);
|
||||
idx = sector_to_idx(r1_bio->sector);
|
||||
atomic_dec(&conf->nr_queued[idx]);
|
||||
if (mddev->degraded)
|
||||
set_bit(R1BIO_Degraded, &r1_bio->state);
|
||||
if (test_bit(R1BIO_WriteError, &r1_bio->state))
|
||||
close_write(r1_bio);
|
||||
raid_end_bio_io(r1_bio);
|
||||
|
@ -188,7 +188,6 @@ struct r1bio {
|
||||
enum r1bio_state {
|
||||
R1BIO_Uptodate,
|
||||
R1BIO_IsSync,
|
||||
R1BIO_Degraded,
|
||||
R1BIO_BehindIO,
|
||||
/* Set ReadError on bios that experience a readerror so that
|
||||
* raid1d knows what to do with them.
|
||||
|
@ -429,8 +429,7 @@ static void close_write(struct r10bio *r10_bio)
|
||||
struct mddev *mddev = r10_bio->mddev;
|
||||
|
||||
/* clear the bitmap if all writes complete successfully */
|
||||
mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
|
||||
!test_bit(R10BIO_Degraded, &r10_bio->state));
|
||||
mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors);
|
||||
md_write_end(mddev);
|
||||
}
|
||||
|
||||
@ -500,7 +499,6 @@ static void raid10_end_write_request(struct bio *bio)
|
||||
set_bit(R10BIO_WriteError, &r10_bio->state);
|
||||
else {
|
||||
/* Fail the request */
|
||||
set_bit(R10BIO_Degraded, &r10_bio->state);
|
||||
r10_bio->devs[slot].bio = NULL;
|
||||
to_put = bio;
|
||||
dec_rdev = 1;
|
||||
@ -1437,10 +1435,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
r10_bio->devs[i].bio = NULL;
|
||||
r10_bio->devs[i].repl_bio = NULL;
|
||||
|
||||
if (!rdev && !rrdev) {
|
||||
set_bit(R10BIO_Degraded, &r10_bio->state);
|
||||
if (!rdev && !rrdev)
|
||||
continue;
|
||||
}
|
||||
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
|
||||
sector_t first_bad;
|
||||
sector_t dev_sector = r10_bio->devs[i].addr;
|
||||
@ -1457,14 +1453,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
* to other devices yet
|
||||
*/
|
||||
max_sectors = bad_sectors;
|
||||
/* We don't set R10BIO_Degraded as that
|
||||
* only applies if the disk is missing,
|
||||
* so it might be re-added, and we want to
|
||||
* know to recover this chunk.
|
||||
* In this case the device is here, and the
|
||||
* fact that this chunk is not in-sync is
|
||||
* recorded in the bad block log.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
if (is_bad) {
|
||||
@ -2964,11 +2952,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
||||
rdev_dec_pending(rdev, conf->mddev);
|
||||
} else if (bio != NULL && bio->bi_status) {
|
||||
fail = true;
|
||||
if (!narrow_write_error(r10_bio, m)) {
|
||||
if (!narrow_write_error(r10_bio, m))
|
||||
md_error(conf->mddev, rdev);
|
||||
set_bit(R10BIO_Degraded,
|
||||
&r10_bio->state);
|
||||
}
|
||||
rdev_dec_pending(rdev, conf->mddev);
|
||||
}
|
||||
bio = r10_bio->devs[m].repl_bio;
|
||||
@ -3027,8 +3012,6 @@ static void raid10d(struct md_thread *thread)
|
||||
r10_bio = list_first_entry(&tmp, struct r10bio,
|
||||
retry_list);
|
||||
list_del(&r10_bio->retry_list);
|
||||
if (mddev->degraded)
|
||||
set_bit(R10BIO_Degraded, &r10_bio->state);
|
||||
|
||||
if (test_bit(R10BIO_WriteError,
|
||||
&r10_bio->state))
|
||||
|
@ -161,7 +161,6 @@ enum r10bio_state {
|
||||
R10BIO_IsSync,
|
||||
R10BIO_IsRecover,
|
||||
R10BIO_IsReshape,
|
||||
R10BIO_Degraded,
|
||||
/* Set ReadError on bios that experience a read error
|
||||
* so that raid10d knows what to do with them.
|
||||
*/
|
||||
|
@ -314,8 +314,7 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
|
||||
set_bit(R5_UPTODATE, &sh->dev[i].flags);
|
||||
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
|
||||
conf->mddev->bitmap_ops->endwrite(conf->mddev,
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state));
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1345,8 +1345,6 @@ again:
|
||||
submit_bio_noacct(rbi);
|
||||
}
|
||||
if (!rdev && !rrdev) {
|
||||
if (op_is_write(op))
|
||||
set_bit(STRIPE_DEGRADED, &sh->state);
|
||||
pr_debug("skip op %d on disc %d for sector %llu\n",
|
||||
bi->bi_opf, i, (unsigned long long)sh->sector);
|
||||
clear_bit(R5_LOCKED, &sh->dev[i].flags);
|
||||
@ -2884,7 +2882,6 @@ static void raid5_end_write_request(struct bio *bi)
|
||||
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
|
||||
} else {
|
||||
if (bi->bi_status) {
|
||||
set_bit(STRIPE_DEGRADED, &sh->state);
|
||||
set_bit(WriteErrorSeen, &rdev->flags);
|
||||
set_bit(R5_WriteError, &sh->dev[i].flags);
|
||||
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
||||
@ -3664,8 +3661,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
||||
}
|
||||
if (bitmap_end)
|
||||
conf->mddev->bitmap_ops->endwrite(conf->mddev,
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
false);
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf));
|
||||
bitmap_end = 0;
|
||||
/* and fail all 'written' */
|
||||
bi = sh->dev[i].written;
|
||||
@ -3711,8 +3707,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
||||
}
|
||||
if (bitmap_end)
|
||||
conf->mddev->bitmap_ops->endwrite(conf->mddev,
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
false);
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf));
|
||||
/* If we were in the middle of a write the parity block might
|
||||
* still be locked - so just clear all R5_LOCKED flags
|
||||
*/
|
||||
@ -4062,8 +4057,7 @@ returnbi:
|
||||
wbi = wbi2;
|
||||
}
|
||||
conf->mddev->bitmap_ops->endwrite(conf->mddev,
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf),
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state));
|
||||
sh->sector, RAID5_STRIPE_SECTORS(conf));
|
||||
if (head_sh->batch_head) {
|
||||
sh = list_first_entry(&sh->batch_list,
|
||||
struct stripe_head,
|
||||
@ -4340,7 +4334,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
|
||||
s->locked++;
|
||||
set_bit(R5_Wantwrite, &dev->flags);
|
||||
|
||||
clear_bit(STRIPE_DEGRADED, &sh->state);
|
||||
set_bit(STRIPE_INSYNC, &sh->state);
|
||||
break;
|
||||
case check_state_run:
|
||||
@ -4497,7 +4490,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
|
||||
clear_bit(R5_Wantwrite, &dev->flags);
|
||||
s->locked--;
|
||||
}
|
||||
clear_bit(STRIPE_DEGRADED, &sh->state);
|
||||
|
||||
set_bit(STRIPE_INSYNC, &sh->state);
|
||||
break;
|
||||
@ -4899,7 +4891,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
|
||||
|
||||
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
|
||||
(1 << STRIPE_PREREAD_ACTIVE) |
|
||||
(1 << STRIPE_DEGRADED) |
|
||||
(1 << STRIPE_ON_UNPLUG_LIST)),
|
||||
head_sh->state & (1 << STRIPE_INSYNC));
|
||||
|
||||
|
@ -358,7 +358,6 @@ enum {
|
||||
STRIPE_REPLACED,
|
||||
STRIPE_PREREAD_ACTIVE,
|
||||
STRIPE_DELAYED,
|
||||
STRIPE_DEGRADED,
|
||||
STRIPE_BIT_DELAY,
|
||||
STRIPE_EXPANDING,
|
||||
STRIPE_EXPAND_SOURCE,
|
||||
|
Loading…
x
Reference in New Issue
Block a user