@@ -1728,7 +1728,7 @@ arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
1728
1728
*/
1729
1729
static arc_buf_hdr_t *
1730
1730
arc_buf_alloc_l2only (size_t size , arc_buf_contents_t type , l2arc_dev_t * dev ,
1731
- dva_t dva , uint64_t daddr , int32_t psize , uint64_t birth ,
1731
+ dva_t dva , uint64_t daddr , int32_t psize , uint64_t asize , uint64_t birth ,
1732
1732
enum zio_compress compress , uint8_t complevel , boolean_t protected ,
1733
1733
boolean_t prefetch , arc_state_type_t arcs_state )
1734
1734
{
@@ -1744,7 +1744,7 @@ arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
1744
1744
arc_hdr_set_flags (hdr , arc_bufc_to_flags (type ) | ARC_FLAG_HAS_L2HDR );
1745
1745
HDR_SET_LSIZE (hdr , size );
1746
1746
HDR_SET_PSIZE (hdr , psize );
1747
- HDR_SET_ASIZE (hdr , vdev_psize_to_asize ( dev -> l2ad_vdev , psize ) );
1747
+ HDR_SET_L2SIZE (hdr , asize );
1748
1748
arc_hdr_set_compress (hdr , compress );
1749
1749
hdr -> b_complevel = complevel ;
1750
1750
if (protected )
@@ -3534,13 +3534,13 @@ l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
3534
3534
{
3535
3535
uint64_t lsize = HDR_GET_LSIZE (hdr );
3536
3536
uint64_t psize = HDR_GET_PSIZE (hdr );
3537
- uint64_t asize = HDR_GET_ASIZE (hdr );
3537
+ uint64_t asize = HDR_GET_L2SIZE (hdr );
3538
3538
arc_buf_contents_t type = hdr -> b_type ;
3539
3539
int64_t lsize_s ;
3540
3540
int64_t psize_s ;
3541
3541
int64_t asize_s ;
3542
3542
3543
- /* For L2 we expect the header's b_asize to be valid */
3543
+ /* For L2 we expect the header's b_l2size to be valid */
3544
3544
ASSERT3U (asize , >=, psize );
3545
3545
3546
3546
if (incr ) {
@@ -3612,7 +3612,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
3612
3612
3613
3613
l2arc_hdr_arcstats_decrement (hdr );
3614
3614
if (dev -> l2ad_vdev != NULL ) {
3615
- uint64_t asize = HDR_GET_ASIZE (hdr );
3615
+ uint64_t asize = HDR_GET_L2SIZE (hdr );
3616
3616
vdev_space_update (dev -> l2ad_vdev , - asize , 0 , 0 );
3617
3617
}
3618
3618
@@ -4501,15 +4501,8 @@ arc_flush_async(spa_t *spa)
4501
4501
uint64_t spa_guid = spa_load_guid (spa );
4502
4502
arc_async_flush_t * af = arc_async_flush_add (spa_guid , 1 );
4503
4503
4504
- /*
4505
- * Note that arc_flush_task() needs arc_async_flush_lock to remove af
4506
- * list node. So by holding the lock we avoid a race for af removal
4507
- * with our use here.
4508
- */
4509
- mutex_enter (& arc_async_flush_lock );
4510
4504
taskq_dispatch_ent (arc_flush_taskq , arc_flush_task ,
4511
4505
af , TQ_SLEEP , & af -> af_tqent );
4512
- mutex_exit (& arc_async_flush_lock );
4513
4506
}
4514
4507
4515
4508
/*
@@ -7876,8 +7869,8 @@ arc_init(void)
7876
7869
list_create (& arc_async_flush_list , sizeof (arc_async_flush_t ),
7877
7870
offsetof(arc_async_flush_t , af_node ));
7878
7871
mutex_init (& arc_async_flush_lock , NULL , MUTEX_DEFAULT , NULL );
7879
- arc_flush_taskq = taskq_create ("arc_flush" , 75 , defclsyspri ,
7880
- 1 , INT_MAX , TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT );
7872
+ arc_flush_taskq = taskq_create ("arc_flush" , MIN ( boot_ncpus , 4 ) ,
7873
+ defclsyspri , 1 , INT_MAX , TASKQ_DYNAMIC );
7881
7874
7882
7875
arc_ksp = kstat_create ("zfs" , 0 , "arcstats" , "misc" , KSTAT_TYPE_NAMED ,
7883
7876
sizeof (arc_stats ) / sizeof (kstat_named_t ), KSTAT_FLAG_VIRTUAL );
@@ -9442,7 +9435,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
9442
9435
hdr -> b_l2hdr .b_arcs_state =
9443
9436
hdr -> b_l1hdr .b_state -> arcs_state ;
9444
9437
/* l2arc_hdr_arcstats_update() expects a valid asize */
9445
- HDR_SET_ASIZE (hdr , asize );
9438
+ HDR_SET_L2SIZE (hdr , asize );
9446
9439
arc_hdr_set_flags (hdr , ARC_FLAG_HAS_L2HDR |
9447
9440
ARC_FLAG_L2_WRITING );
9448
9441
@@ -9609,10 +9602,8 @@ l2arc_feed_thread(void *unused)
9609
9602
* held to prevent device removal. l2arc_dev_get_next()
9610
9603
* will grab and release l2arc_dev_mtx.
9611
9604
*/
9612
- if ((dev = l2arc_dev_get_next ()) == NULL ||
9613
- dev -> l2ad_spa == NULL ) {
9605
+ if ((dev = l2arc_dev_get_next ()) == NULL )
9614
9606
continue ;
9615
- }
9616
9607
9617
9608
spa = dev -> l2ad_spa ;
9618
9609
ASSERT3P (spa , != , NULL );
@@ -10573,7 +10564,8 @@ l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
10573
10564
arc_buf_hdr_t * hdr , * exists ;
10574
10565
kmutex_t * hash_lock ;
10575
10566
arc_buf_contents_t type = L2BLK_GET_TYPE ((le )-> le_prop );
10576
- uint64_t asize ;
10567
+ uint64_t asize = vdev_psize_to_asize (dev -> l2ad_vdev ,
10568
+ L2BLK_GET_PSIZE ((le )-> le_prop ));
10577
10569
10578
10570
/*
10579
10571
* Do all the allocation before grabbing any locks, this lets us
@@ -10582,14 +10574,11 @@ l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
10582
10574
*/
10583
10575
hdr = arc_buf_alloc_l2only (L2BLK_GET_LSIZE ((le )-> le_prop ), type ,
10584
10576
dev , le -> le_dva , le -> le_daddr ,
10585
- L2BLK_GET_PSIZE ((le )-> le_prop ), le -> le_birth ,
10577
+ L2BLK_GET_PSIZE ((le )-> le_prop ), asize , le -> le_birth ,
10586
10578
L2BLK_GET_COMPRESS ((le )-> le_prop ), le -> le_complevel ,
10587
10579
L2BLK_GET_PROTECTED ((le )-> le_prop ),
10588
10580
L2BLK_GET_PREFETCH ((le )-> le_prop ),
10589
10581
L2BLK_GET_STATE ((le )-> le_prop ));
10590
- asize = vdev_psize_to_asize (dev -> l2ad_vdev ,
10591
- L2BLK_GET_PSIZE ((le )-> le_prop ));
10592
- ASSERT3U (asize , = = , HDR_GET_ASIZE (hdr ));
10593
10582
10594
10583
/*
10595
10584
* vdev_space_update() has to be called before arc_hdr_destroy() to
@@ -10620,7 +10609,7 @@ l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
10620
10609
exists -> b_l2hdr .b_arcs_state =
10621
10610
L2BLK_GET_STATE ((le )-> le_prop );
10622
10611
/* l2arc_hdr_arcstats_update() expects a valid asize */
10623
- HDR_SET_ASIZE (exists , asize );
10612
+ HDR_SET_L2SIZE (exists , asize );
10624
10613
mutex_enter (& dev -> l2ad_mtx );
10625
10614
list_insert_tail (& dev -> l2ad_buflist , exists );
10626
10615
(void ) zfs_refcount_add_many (& dev -> l2ad_alloc ,
0 commit comments