Skip to content

Commit 21d08be

Browse files
committed
Reduce latency effects of non-interactive I/O.
Investigating influence of scrub (especially sequential) on random read latency I've noticed that on some HDDs single 4KB read may take up to 4 seconds! Deeper investigation shown that many HDDs heavily prioritize sequential reads even when those are submitted with queue depth of 1. This patch addresses the latency from two sides: - by using _min_active queue depths for non-interactive requests while the interactive request(s) are active and few requests after; - by throttling it further if no interactive requests has completed while configured amount of non-interactive did. While there, I've also modified vdev_queue_class_to_issue() to give more chances to schedule at least _min_active requests to the lowest priorities. It should reduce starvation if several non-interactive processes are running same time with some interactive and I think should make possible setting of zfs_vdev_max_active to as low as 1. I've benchmarked this change with 4KB random reads from ZVOL with 16KB block size on newly written non-fragmented pool. On fragmented pool I also saw improvements, but not so dramatic. Below are log2 histograms of the random read latency in milliseconds for different devices: 4 2x mirror vdevs of SATA HDD WDC WD20EFRX-68EUZN0 before: 0, 0, 2, 1, 12, 21, 19, 18, 10, 15, 17, 21 after: 0, 0, 0, 24, 101, 195, 419, 250, 47, 4, 0, 0 , that means maximum latency reduction from 2s to 500ms. 4 2x mirror vdevs of SATA HDD WDC WD80EFZX-68UW8N0 before: 0, 0, 2, 31, 38, 28, 18, 12, 17, 20, 24, 10, 3 after: 0, 0, 55, 247, 455, 470, 412, 181, 36, 0, 0, 0, 0 , i.e. from 4s to 250ms. 1 SAS HDD SEAGATE ST14000NM0048 before: 0, 0, 29, 70, 107, 45, 27, 1, 0, 0, 1, 4, 19 after: 1, 29, 681, 1261, 676, 1633, 67, 1, 0, 0, 0, 0, 0 , i.e. from 4s to 125ms. 1 SAS SSD SEAGATE XS3840TE70014 before (microseconds): 0, 0, 0, 0, 0, 0, 0, 0, 70, 18343, 82548, 618 after: 0, 0, 0, 0, 0, 0, 0, 0, 283, 92351, 34844, 90 I've also measured scrub time during the test and on idle pools. On idle fragmented pool I've measured scrub geting few percent faster due to use of QD3 instead of QD2 before. On idle non-fragmented pool I've measured no difference. On busy non-fragmented pool I've measured scrub time increase about 1.5-1.7x, while IOPS increase reached 5-9x. Signed-off-by: Alexander Motin <[email protected]> Sponsored-By: iXsystems, Inc.
1 parent 52e585a commit 21d08be

File tree

4 files changed

+113
-20
lines changed

4 files changed

+113
-20
lines changed

include/sys/vdev_impl.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,9 @@ struct vdev_queue {
148148
avl_tree_t vq_write_offset_tree;
149149
avl_tree_t vq_trim_offset_tree;
150150
uint64_t vq_last_offset;
151+
zio_priority_t vq_last_prio; /* Last sent I/O priority. */
152+
int32_t vq_ia_active; /* Active interactive I/Os. */
153+
int32_t vq_nia_credit; /* Non-interactive I/Os credit. */
151154
hrtime_t vq_io_complete_ts; /* time last i/o completed */
152155
hrtime_t vq_io_delta_ts;
153156
zio_t vq_io_search; /* used as local for stack reduction */

include/sys/zio_priority.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,15 +27,17 @@ typedef enum zio_priority {
2727
ZIO_PRIORITY_SYNC_WRITE, /* ZIL */
2828
ZIO_PRIORITY_ASYNC_READ, /* prefetch */
2929
ZIO_PRIORITY_ASYNC_WRITE, /* spa_sync() */
30+
ZIO_PRIORITY_TRIM, /* trim I/O (discard) */
3031
ZIO_PRIORITY_SCRUB, /* asynchronous scrub/resilver reads */
3132
ZIO_PRIORITY_REMOVAL, /* reads/writes for vdev removal */
3233
ZIO_PRIORITY_INITIALIZING, /* initializing I/O */
33-
ZIO_PRIORITY_TRIM, /* trim I/O (discard) */
3434
ZIO_PRIORITY_REBUILD, /* reads/writes for vdev rebuild */
3535
ZIO_PRIORITY_NUM_QUEUEABLE,
3636
ZIO_PRIORITY_NOW, /* non-queued i/os (e.g. free) */
3737
} zio_priority_t;
3838

39+
#define ZIO_PRIORITY_MAX_INTERACTIVE ZIO_PRIORITY_TRIM
40+
3941
#ifdef __cplusplus
4042
}
4143
#endif

man/man5/zfs-module-parameters.5

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2165,6 +2165,38 @@ See the section "ZFS I/O SCHEDULER".
21652165
Default value: \fB1\fR.
21662166
.RE
21672167

2168+
.sp
2169+
.ne 2
2170+
.na
2171+
\fBzfs_vdev_nia_delay\fR (int)
2172+
.ad
2173+
.RS 12n
2174+
To reduce effects of non-interactive I/O on interactive I/O latency
2175+
the first are limited to *_min_active while there are second active,
2176+
plus at least this number of I/Os after in case interactive return.
2177+
See the section "ZFS I/O SCHEDULER".
2178+
.sp
2179+
Default value: \fB5\fR.
2180+
.RE
2181+
2182+
.sp
2183+
.ne 2
2184+
.na
2185+
\fBzfs_vdev_nia_credit\fR (int)
2186+
.ad
2187+
.RS 12n
2188+
Some HDDs tend to prioritize sequential I/O so high, that concurrent
2189+
random I/O latency reaches several seconds. On some HDDs it happens
2190+
even if sequential I/Os are submitted one at a time, and so setting
2191+
*_max_active to 1 does not help. To handle this in case of scrub
2192+
and other non-interactive I/O this tunable limits the number of their
2193+
I/Os that can be sent until at least one interactive I/O completes
2194+
without the enforced wait, making the HDD to stop the spree.
2195+
See the section "ZFS I/O SCHEDULER".
2196+
.sp
2197+
Default value: \fB5\fR.
2198+
.RE
2199+
21682200
.sp
21692201
.ne 2
21702202
.na

module/zfs/vdev_queue.c

Lines changed: 75 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ uint32_t zfs_vdev_async_read_max_active = 3;
151151
uint32_t zfs_vdev_async_write_min_active = 2;
152152
uint32_t zfs_vdev_async_write_max_active = 10;
153153
uint32_t zfs_vdev_scrub_min_active = 1;
154-
uint32_t zfs_vdev_scrub_max_active = 2;
154+
uint32_t zfs_vdev_scrub_max_active = 3;
155155
uint32_t zfs_vdev_removal_min_active = 1;
156156
uint32_t zfs_vdev_removal_max_active = 2;
157157
uint32_t zfs_vdev_initializing_min_active = 1;
@@ -171,6 +171,24 @@ uint32_t zfs_vdev_rebuild_max_active = 3;
171171
int zfs_vdev_async_write_active_min_dirty_percent = 30;
172172
int zfs_vdev_async_write_active_max_dirty_percent = 60;
173173

174+
/*
175+
* To reduce effects of non-interactive I/O on interactive I/O latency
176+
* the first are limited to *_min_active while there are second active,
177+
* plus at least this number of I/Os after in case interactive return.
178+
*/
179+
int zfs_vdev_nia_delay = 5;
180+
181+
/*
182+
* Some HDDs tend to prioritize sequential I/O so high, that concurrent
183+
* random I/O latency reaches several seconds. On some HDDs it happens
184+
* even if sequential I/Os are submitted one at a time, and so setting
185+
* *_max_active to 1 does not help. To handle this in case of scrub
186+
* and other non-interactive I/O this tunable limits the number of their
187+
* I/Os that can be sent until at least one interactive I/O completes
188+
* without the enforced wait, making the HDD to stop the spree.
189+
*/
190+
int zfs_vdev_nia_credit = 5;
191+
174192
/*
175193
* To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
176194
* For read I/Os, we also aggregate across small adjacency gaps; for writes
@@ -261,7 +279,7 @@ vdev_queue_timestamp_compare(const void *x1, const void *x2)
261279
}
262280

263281
static int
264-
vdev_queue_class_min_active(zio_priority_t p)
282+
vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p)
265283
{
266284
switch (p) {
267285
case ZIO_PRIORITY_SYNC_READ:
@@ -272,16 +290,22 @@ vdev_queue_class_min_active(zio_priority_t p)
272290
return (zfs_vdev_async_read_min_active);
273291
case ZIO_PRIORITY_ASYNC_WRITE:
274292
return (zfs_vdev_async_write_min_active);
293+
case ZIO_PRIORITY_TRIM:
294+
return (zfs_vdev_trim_min_active);
275295
case ZIO_PRIORITY_SCRUB:
276-
return (zfs_vdev_scrub_min_active);
296+
#define M(X) if (vq->vq_ia_active > 0) { \
297+
return (MIN(vq->vq_nia_credit, \
298+
zfs_vdev_##X##_min_active)); \
299+
} \
300+
return (zfs_vdev_##X##_min_active)
301+
M(scrub);
277302
case ZIO_PRIORITY_REMOVAL:
278-
return (zfs_vdev_removal_min_active);
303+
M(removal);
279304
case ZIO_PRIORITY_INITIALIZING:
280-
return (zfs_vdev_initializing_min_active);
281-
case ZIO_PRIORITY_TRIM:
282-
return (zfs_vdev_trim_min_active);
305+
M(initializing);
283306
case ZIO_PRIORITY_REBUILD:
284-
return (zfs_vdev_rebuild_min_active);
307+
M(rebuild);
308+
#undef M
285309
default:
286310
panic("invalid priority %u", p);
287311
return (0);
@@ -337,7 +361,7 @@ vdev_queue_max_async_writes(spa_t *spa)
337361
}
338362

339363
static int
340-
vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
364+
vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p)
341365
{
342366
switch (p) {
343367
case ZIO_PRIORITY_SYNC_READ:
@@ -348,16 +372,23 @@ vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
348372
return (zfs_vdev_async_read_max_active);
349373
case ZIO_PRIORITY_ASYNC_WRITE:
350374
return (vdev_queue_max_async_writes(spa));
375+
case ZIO_PRIORITY_TRIM:
376+
return (zfs_vdev_trim_max_active);
351377
case ZIO_PRIORITY_SCRUB:
352-
return (zfs_vdev_scrub_max_active);
378+
#define M(X) if (vq->vq_ia_active > 0) { \
379+
return (MIN(vq->vq_nia_credit, \
380+
zfs_vdev_##X##_min_active)); \
381+
} else if (vq->vq_nia_credit < zfs_vdev_nia_delay) \
382+
return (zfs_vdev_##X##_min_active); \
383+
return (zfs_vdev_##X##_max_active);
384+
M(scrub);
353385
case ZIO_PRIORITY_REMOVAL:
354-
return (zfs_vdev_removal_max_active);
386+
M(removal);
355387
case ZIO_PRIORITY_INITIALIZING:
356-
return (zfs_vdev_initializing_max_active);
357-
case ZIO_PRIORITY_TRIM:
358-
return (zfs_vdev_trim_max_active);
388+
M(initializing);
359389
case ZIO_PRIORITY_REBUILD:
360-
return (zfs_vdev_rebuild_max_active);
390+
M(rebuild);
391+
#undef M
361392
default:
362393
panic("invalid priority %u", p);
363394
return (0);
@@ -372,17 +403,22 @@ static zio_priority_t
372403
vdev_queue_class_to_issue(vdev_queue_t *vq)
373404
{
374405
spa_t *spa = vq->vq_vdev->vdev_spa;
375-
zio_priority_t p;
406+
zio_priority_t p, n;
376407

377408
if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
378409
return (ZIO_PRIORITY_NUM_QUEUEABLE);
379410

380411
/* find a queue that has not reached its minimum # outstanding i/os */
381-
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
412+
p = vq->vq_last_prio;
413+
p = (p == ZIO_PRIORITY_NUM_QUEUEABLE - 1) ? 0 : p + 1;
414+
for (n = ZIO_PRIORITY_NUM_QUEUEABLE; n > 0; n--) {
382415
if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
383416
vq->vq_class[p].vqc_active <
384-
vdev_queue_class_min_active(p))
417+
vdev_queue_class_min_active(vq, p)) {
418+
vq->vq_last_prio = p;
385419
return (p);
420+
}
421+
p = (p == ZIO_PRIORITY_NUM_QUEUEABLE - 1) ? 0 : p + 1;
386422
}
387423

388424
/*
@@ -392,8 +428,10 @@ vdev_queue_class_to_issue(vdev_queue_t *vq)
392428
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
393429
if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
394430
vq->vq_class[p].vqc_active <
395-
vdev_queue_class_max_active(spa, p))
431+
vdev_queue_class_max_active(spa, vq, p)) {
432+
vq->vq_last_prio = p;
396433
return (p);
434+
}
397435
}
398436

399437
/* No eligible queued i/os */
@@ -502,6 +540,11 @@ vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
502540
ASSERT(MUTEX_HELD(&vq->vq_lock));
503541
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
504542
vq->vq_class[zio->io_priority].vqc_active++;
543+
if (zio->io_priority <= ZIO_PRIORITY_MAX_INTERACTIVE) {
544+
if (vq->vq_ia_active++ == 0)
545+
vq->vq_nia_credit = 1;
546+
} else if (vq->vq_ia_active > 0)
547+
vq->vq_nia_credit--;
505548
avl_add(&vq->vq_active_tree, zio);
506549

507550
if (shk->kstat != NULL) {
@@ -520,6 +563,13 @@ vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
520563
ASSERT(MUTEX_HELD(&vq->vq_lock));
521564
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
522565
vq->vq_class[zio->io_priority].vqc_active--;
566+
if (zio->io_priority <= ZIO_PRIORITY_MAX_INTERACTIVE) {
567+
if (--vq->vq_ia_active == 0)
568+
vq->vq_nia_credit = 0;
569+
else
570+
vq->vq_nia_credit = zfs_vdev_nia_credit;
571+
} else if (vq->vq_ia_active == 0)
572+
vq->vq_nia_credit++;
523573
avl_remove(&vq->vq_active_tree, zio);
524574

525575
if (shk->kstat != NULL) {
@@ -1065,6 +1115,12 @@ ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, INT, ZMOD_RW,
10651115
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, INT, ZMOD_RW,
10661116
"Min active rebuild I/Os per vdev");
10671117

1118+
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, INT, ZMOD_RW,
1119+
"Number of non-interactive I/Os to allow in sequence");
1120+
1121+
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, INT, ZMOD_RW,
1122+
"Number of non-interactive I/Os before _max_active");
1123+
10681124
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, INT, ZMOD_RW,
10691125
"Queue depth percentage for each top-level vdev");
10701126
/* END CSTYLED */

0 commit comments

Comments
 (0)