Skip to content

Commit aa8e4e0

Browse files
committed
Optimize RAIDZ expansion
- Instead of copying one ashift-sized block per ZIO, copy as much as we have contiguous data up to 16MB per old vdev. To avoid data moves use gang ABDs, so that read ZIOs can directly fill buffers for write ZIOs. ABDs have much smaller overhead than ZIOs in both memory usage and procissing time, plus big I/Os do not depend on I/O aggregation and scheduling to reach decent performance on HDDs. - Use 32bit range tree when possible (practically always now) to slightly reduce memory usage. - Use ZIO_PRIORITY_REMOVAL for early stages of expansion, same as for main ones. - Fix rate overflows in `zpool status` reporting. With these changes expanding RAIDZ1 from 4 to 5 children I am able to reach ~6GB/s rate on SSDs and ~500MB/s on HDDs, both are limited by devices instead of CPU. Signed-off-by: Alexander Motin <[email protected]> Sponsored by: iXsystems, Inc.
1 parent 0ffa6f3 commit aa8e4e0

File tree

2 files changed

+117
-61
lines changed

2 files changed

+117
-61
lines changed

cmd/zpool/zpool_main.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10034,9 +10034,8 @@ print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
1003410034
(void) printf(gettext("Removal of %s canceled on %s"),
1003510035
vdev_name, ctime(&end));
1003610036
} else {
10037-
uint64_t copied, total, elapsed, mins_left, hours_left;
10037+
uint64_t copied, total, elapsed, rate, mins_left, hours_left;
1003810038
double fraction_done;
10039-
uint_t rate;
1004010039

1004110040
assert(prs->prs_state == DSS_SCANNING);
1004210041

@@ -10132,9 +10131,8 @@ print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
1013210131
copied_buf, time_buf, ctime((time_t *)&end));
1013310132
} else {
1013410133
char examined_buf[7], total_buf[7], rate_buf[7];
10135-
uint64_t copied, total, elapsed, secs_left;
10134+
uint64_t copied, total, elapsed, rate, secs_left;
1013610135
double fraction_done;
10137-
uint_t rate;
1013810136

1013910137
assert(pres->pres_state == DSS_SCANNING);
1014010138

module/zfs/vdev_raidz.c

Lines changed: 115 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -3817,16 +3817,21 @@ raidz_reflow_complete_sync(void *arg, dmu_tx_t *tx)
38173817
}
38183818

38193819
/*
3820-
* Struct for one copy zio.
3820+
* State of one copy batch.
38213821
*/
38223822
typedef struct raidz_reflow_arg {
3823-
vdev_raidz_expand_t *rra_vre;
3824-
zfs_locked_range_t *rra_lr;
3825-
uint64_t rra_txg;
3823+
vdev_raidz_expand_t *rra_vre; /* Global expantion state. */
3824+
zfs_locked_range_t *rra_lr; /* Range lock of this batch. */
3825+
uint64_t rra_txg; /* TXG of this batch. */
3826+
uint_t rra_ashift; /* Ashift of the vdev. */
3827+
uint32_t rra_tbd; /* Number of in-flight ZIOs. */
3828+
uint32_t rra_writes; /* Number of write ZIOs. */
3829+
zio_t *rra_zio[]; /* Write ZIO pointers. */
38263830
} raidz_reflow_arg_t;
38273831

38283832
/*
3829-
* The write of the new location is done.
3833+
* Write of the new location on one child is done. Once all of them are done
3834+
* we can unlock and free everything.
38303835
*/
38313836
static void
38323837
raidz_reflow_write_done(zio_t *zio)
@@ -3850,24 +3855,30 @@ raidz_reflow_write_done(zio_t *zio)
38503855
zio->io_size;
38513856
}
38523857
cv_signal(&vre->vre_cv);
3858+
boolean_t done = (--rra->rra_tbd == 0);
38533859
mutex_exit(&vre->vre_lock);
38543860

3855-
zfs_rangelock_exit(rra->rra_lr);
3856-
3857-
kmem_free(rra, sizeof (*rra));
3861+
if (!done)
3862+
return;
38583863
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
3864+
zfs_rangelock_exit(rra->rra_lr);
3865+
kmem_free(rra, sizeof (*rra) + sizeof (zio_t *) * rra->rra_writes);
38593866
}
38603867

38613868
/*
3862-
* The read of the old location is done. The parent zio is the write to
3863-
* the new location. Allow it to start.
3869+
* Read of the old location on one child is done. Once all of them are done
3870+
* writes should have all the data and we can issue them.
38643871
*/
38653872
static void
38663873
raidz_reflow_read_done(zio_t *zio)
38673874
{
38683875
raidz_reflow_arg_t *rra = zio->io_private;
38693876
vdev_raidz_expand_t *vre = rra->rra_vre;
38703877

3878+
/* Reads of only one block use write ABDs. For bigger free gangs. */
3879+
if (zio->io_size > (1 << rra->rra_ashift))
3880+
abd_free(zio->io_abd);
3881+
38713882
/*
38723883
* If the read failed, or if it was done on a vdev that is not fully
38733884
* healthy (e.g. a child that has a resilver in progress), we may not
@@ -3891,7 +3902,11 @@ raidz_reflow_read_done(zio_t *zio)
38913902
mutex_exit(&vre->vre_lock);
38923903
}
38933904

3894-
zio_nowait(zio_unique_parent(zio));
3905+
if (atomic_dec_32_nv(&rra->rra_tbd) > 0)
3906+
return;
3907+
rra->rra_tbd = rra->rra_writes;
3908+
for (uint64_t i = 0; i < rra->rra_writes; i++)
3909+
zio_nowait(rra->rra_zio[i]);
38953910
}
38963911

38973912
static void
@@ -3932,21 +3947,19 @@ raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt,
39323947
dmu_tx_t *tx)
39333948
{
39343949
spa_t *spa = vd->vdev_spa;
3935-
int ashift = vd->vdev_top->vdev_ashift;
3936-
uint64_t offset, size;
3950+
uint_t ashift = vd->vdev_top->vdev_ashift;
39373951

3938-
if (!range_tree_find_in(rt, 0, vd->vdev_top->vdev_asize,
3939-
&offset, &size)) {
3952+
range_seg_t *rs = range_tree_first(rt);
3953+
if (rt == NULL)
39403954
return (B_FALSE);
3941-
}
3955+
uint64_t offset = rs_get_start(rs, rt);
39423956
ASSERT(IS_P2ALIGNED(offset, 1 << ashift));
3957+
uint64_t size = rs_get_end(rs, rt) - offset;
39433958
ASSERT3U(size, >=, 1 << ashift);
3944-
uint64_t length = 1 << ashift;
3945-
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
3959+
ASSERT(IS_P2ALIGNED(size, 1 << ashift));
39463960

39473961
uint64_t blkid = offset >> ashift;
3948-
3949-
int old_children = vd->vdev_children - 1;
3962+
uint_t old_children = vd->vdev_children - 1;
39503963

39513964
/*
39523965
* We can only progress to the point that writes will not overlap
@@ -3965,26 +3978,34 @@ raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt,
39653978
uint64_t next_overwrite_blkid = ubsync_blkid +
39663979
ubsync_blkid / old_children - old_children;
39673980
VERIFY3U(next_overwrite_blkid, >, ubsync_blkid);
3968-
39693981
if (blkid >= next_overwrite_blkid) {
39703982
raidz_reflow_record_progress(vre,
39713983
next_overwrite_blkid << ashift, tx);
39723984
return (B_TRUE);
39733985
}
39743986

3975-
range_tree_remove(rt, offset, length);
3987+
size = MIN(size, raidz_expand_max_copy_bytes);
3988+
size = MIN(size, (uint64_t)old_children *
3989+
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE));
3990+
size = MAX(size, 1 << ashift);
3991+
uint_t blocks = MIN(size >> ashift, next_overwrite_blkid - blkid);
3992+
size = (uint64_t)blocks << ashift;
39763993

3977-
raidz_reflow_arg_t *rra = kmem_zalloc(sizeof (*rra), KM_SLEEP);
3994+
range_tree_remove(rt, offset, size);
3995+
3996+
uint_t reads = MIN(blocks, old_children);
3997+
uint_t writes = MIN(blocks, vd->vdev_children);
3998+
raidz_reflow_arg_t *rra = kmem_zalloc(sizeof (*rra) +
3999+
sizeof (zio_t *) * writes, KM_SLEEP);
39784000
rra->rra_vre = vre;
39794001
rra->rra_lr = zfs_rangelock_enter(&vre->vre_rangelock,
3980-
offset, length, RL_WRITER);
4002+
offset, size, RL_WRITER);
39814003
rra->rra_txg = dmu_tx_get_txg(tx);
4004+
rra->rra_ashift = ashift;
4005+
rra->rra_tbd = reads;
4006+
rra->rra_writes = writes;
39824007

3983-
raidz_reflow_record_progress(vre, offset + length, tx);
3984-
3985-
mutex_enter(&vre->vre_lock);
3986-
vre->vre_outstanding_bytes += length;
3987-
mutex_exit(&vre->vre_lock);
4008+
raidz_reflow_record_progress(vre, offset + size, tx);
39884009

39894010
/*
39904011
* SCL_STATE will be released when the read and write are done,
@@ -4006,29 +4027,61 @@ raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt,
40064027
mutex_exit(&vre->vre_lock);
40074028

40084029
/* drop everything we acquired */
4009-
zfs_rangelock_exit(rra->rra_lr);
4010-
kmem_free(rra, sizeof (*rra));
40114030
spa_config_exit(spa, SCL_STATE, spa);
4031+
zfs_rangelock_exit(rra->rra_lr);
4032+
kmem_free(rra, sizeof (*rra) + sizeof (zio_t *) * writes);
40124033
return (B_TRUE);
40134034
}
40144035

4036+
mutex_enter(&vre->vre_lock);
4037+
vre->vre_outstanding_bytes += size;
4038+
mutex_exit(&vre->vre_lock);
4039+
4040+
/* Allocate ABD and ZIO for each child we write. */
4041+
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
40154042
zio_t *pio = spa->spa_txg_zio[txgoff];
4016-
abd_t *abd = abd_alloc_for_io(length, B_FALSE);
4017-
zio_t *write_zio = zio_vdev_child_io(pio, NULL,
4018-
vd->vdev_child[blkid % vd->vdev_children],
4019-
(blkid / vd->vdev_children) << ashift,
4020-
abd, length,
4021-
ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
4022-
ZIO_FLAG_CANFAIL,
4023-
raidz_reflow_write_done, rra);
4024-
4025-
zio_nowait(zio_vdev_child_io(write_zio, NULL,
4026-
vd->vdev_child[blkid % old_children],
4027-
(blkid / old_children) << ashift,
4028-
abd, length,
4029-
ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
4030-
ZIO_FLAG_CANFAIL,
4031-
raidz_reflow_read_done, rra));
4043+
uint_t b = blocks / vd->vdev_children;
4044+
uint_t bb = blocks % vd->vdev_children;
4045+
for (uint_t i = 0; i < writes; i++) {
4046+
uint_t n = b + (i < bb);
4047+
abd_t *abd = abd_alloc_for_io(n << ashift, B_FALSE);
4048+
rra->rra_zio[i] = zio_vdev_child_io(pio, NULL,
4049+
vd->vdev_child[(blkid + i) % vd->vdev_children],
4050+
((blkid + i) / vd->vdev_children) << ashift,
4051+
abd, n << ashift, ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
4052+
ZIO_FLAG_CANFAIL, raidz_reflow_write_done, rra);
4053+
}
4054+
4055+
/*
4056+
* Allocate and issue ZIO for each child we read. For reads of only
4057+
* one block we can use respective writer ABDs, since they will also
4058+
* have only one block. For bigger reads create gang ABDs and fill
4059+
* them with respective blocks from writer ABDs.
4060+
*/
4061+
b = blocks / old_children;
4062+
bb = blocks % old_children;
4063+
for (uint_t i = 0; i < reads; i++) {
4064+
uint_t n = b + (i < bb);
4065+
abd_t *abd;
4066+
if (n > 1) {
4067+
abd = abd_alloc_gang();
4068+
for (uint_t j = 0; j < n; j++) {
4069+
uint_t b = j * old_children + i;
4070+
abd_t *cabd = abd_get_offset_size(
4071+
rra->rra_zio[b % vd->vdev_children]->io_abd,
4072+
(b / vd->vdev_children) << ashift,
4073+
1 << ashift);
4074+
abd_gang_add(abd, cabd, B_TRUE);
4075+
}
4076+
} else {
4077+
abd = rra->rra_zio[i]->io_abd;
4078+
}
4079+
zio_nowait(zio_vdev_child_io(pio, NULL,
4080+
vd->vdev_child[(blkid + i) % old_children],
4081+
((blkid + i) / old_children) << ashift, abd,
4082+
n << ashift, ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
4083+
ZIO_FLAG_CANFAIL, raidz_reflow_read_done, rra));
4084+
}
40324085

40334086
return (B_FALSE);
40344087
}
@@ -4122,7 +4175,7 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx)
41224175
zio_nowait(zio_vdev_child_io(pio, NULL,
41234176
raidvd->vdev_child[i],
41244177
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abds[i],
4125-
write_size, ZIO_TYPE_READ, ZIO_PRIORITY_ASYNC_READ,
4178+
write_size, ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
41264179
ZIO_FLAG_CANFAIL, raidz_scratch_child_done, pio));
41274180
}
41284181
error = zio_wait(pio);
@@ -4142,7 +4195,7 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx)
41424195
ASSERT0(vdev_is_dead(raidvd->vdev_child[i]));
41434196
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
41444197
0, abds[i], read_size, ZIO_TYPE_READ,
4145-
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
4198+
ZIO_PRIORITY_REMOVAL, ZIO_FLAG_CANFAIL,
41464199
raidz_scratch_child_done, pio));
41474200
}
41484201
error = zio_wait(pio);
@@ -4197,7 +4250,7 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx)
41974250
*/
41984251
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
41994252
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abds[i],
4200-
write_size, ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
4253+
write_size, ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
42014254
ZIO_FLAG_CANFAIL, raidz_scratch_child_done, pio));
42024255
}
42034256
error = zio_wait(pio);
@@ -4246,7 +4299,7 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx)
42464299
for (int i = 0; i < raidvd->vdev_children; i++) {
42474300
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
42484301
0, abds[i], write_size, ZIO_TYPE_WRITE,
4249-
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL,
4302+
ZIO_PRIORITY_REMOVAL, ZIO_FLAG_CANFAIL,
42504303
raidz_scratch_child_done, pio));
42514304
}
42524305
error = zio_wait(pio);
@@ -4355,8 +4408,7 @@ vdev_raidz_reflow_copy_scratch(spa_t *spa)
43554408
*/
43564409
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
43574410
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abds[i],
4358-
write_size, ZIO_TYPE_READ,
4359-
ZIO_PRIORITY_ASYNC_READ, 0,
4411+
write_size, ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 0,
43604412
raidz_scratch_child_done, pio));
43614413
}
43624414
zio_wait(pio);
@@ -4368,7 +4420,7 @@ vdev_raidz_reflow_copy_scratch(spa_t *spa)
43684420
for (int i = 0; i < raidvd->vdev_children; i++) {
43694421
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
43704422
0, abds[i], write_size, ZIO_TYPE_WRITE,
4371-
ZIO_PRIORITY_ASYNC_WRITE, 0,
4423+
ZIO_PRIORITY_REMOVAL, 0,
43724424
raidz_scratch_child_done, pio));
43734425
}
43744426
zio_wait(pio);
@@ -4490,8 +4542,11 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
44904542
* space. Note that there may be a little bit more free
44914543
* space (e.g. in ms_defer), and it's fine to copy that too.
44924544
*/
4493-
range_tree_t *rt = range_tree_create(NULL, RANGE_SEG64,
4494-
NULL, 0, 0);
4545+
uint64_t shift, start;
4546+
range_seg_type_t type = metaslab_calculate_range_tree_type(
4547+
raidvd, msp, &start, &shift);
4548+
range_tree_t *rt = range_tree_create(NULL, type, NULL,
4549+
start, shift);
44954550
range_tree_add(rt, msp->ms_start, msp->ms_size);
44964551
range_tree_walk(msp->ms_allocatable, range_tree_remove, rt);
44974552
mutex_exit(&msp->ms_lock);
@@ -4516,7 +4571,10 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
45164571
* when importing a pool with a expansion in progress),
45174572
* discard any state that we have already processed.
45184573
*/
4519-
range_tree_clear(rt, 0, vre->vre_offset);
4574+
if (vre->vre_offset > msp->ms_start) {
4575+
range_tree_clear(rt, msp->ms_start,
4576+
vre->vre_offset - msp->ms_start);
4577+
}
45204578

45214579
while (!zthr_iscancelled(zthr) &&
45224580
!range_tree_is_empty(rt) &&

0 commit comments

Comments
 (0)