@@ -1098,14 +1098,20 @@ static kmutex_t l2arc_feed_thr_lock;
1098
1098
static kcondvar_t l2arc_feed_thr_cv ;
1099
1099
static uint8_t l2arc_thread_exit ;
1100
1100
1101
- static abd_t * arc_get_data_abd (arc_buf_hdr_t * , uint64_t , void * );
1101
+ enum arc_hdr_alloc_flags {
1102
+ ARC_HDR_ALLOC_RDATA = 0x1 ,
1103
+ ARC_HDR_DO_ADAPT = 0x2 ,
1104
+ };
1105
+
1106
+
1107
+ static abd_t * arc_get_data_abd (arc_buf_hdr_t * , uint64_t , void * , boolean_t );
1102
1108
static void * arc_get_data_buf (arc_buf_hdr_t * , uint64_t , void * );
1103
- static void arc_get_data_impl (arc_buf_hdr_t * , uint64_t , void * );
1109
+ static void arc_get_data_impl (arc_buf_hdr_t * , uint64_t , void * , boolean_t );
1104
1110
static void arc_free_data_abd (arc_buf_hdr_t * , abd_t * , uint64_t , void * );
1105
1111
static void arc_free_data_buf (arc_buf_hdr_t * , void * , uint64_t , void * );
1106
1112
static void arc_free_data_impl (arc_buf_hdr_t * hdr , uint64_t size , void * tag );
1107
1113
static void arc_hdr_free_abd (arc_buf_hdr_t * , boolean_t );
1108
- static void arc_hdr_alloc_abd (arc_buf_hdr_t * , boolean_t );
1114
+ static void arc_hdr_alloc_abd (arc_buf_hdr_t * , int );
1109
1115
static void arc_access (arc_buf_hdr_t * , kmutex_t * );
1110
1116
static boolean_t arc_is_overflowing (void );
1111
1117
static void arc_buf_watch (arc_buf_t * );
@@ -1980,7 +1986,7 @@ arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
1980
1986
ASSERT (HDR_EMPTY_OR_LOCKED (hdr ));
1981
1987
ASSERT (HDR_ENCRYPTED (hdr ));
1982
1988
1983
- arc_hdr_alloc_abd (hdr , B_FALSE );
1989
+ arc_hdr_alloc_abd (hdr , ARC_HDR_DO_ADAPT );
1984
1990
1985
1991
ret = spa_do_crypt_abd (B_FALSE , spa , zb , hdr -> b_crypt_hdr .b_ot ,
1986
1992
B_FALSE , bswap , hdr -> b_crypt_hdr .b_salt , hdr -> b_crypt_hdr .b_iv ,
@@ -2007,7 +2013,7 @@ arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
2007
2013
* and then loan a buffer from it, rather than allocating a
2008
2014
* linear buffer and wrapping it in an abd later.
2009
2015
*/
2010
- cabd = arc_get_data_abd (hdr , arc_hdr_size (hdr ), hdr );
2016
+ cabd = arc_get_data_abd (hdr , arc_hdr_size (hdr ), hdr , B_TRUE );
2011
2017
tmp = abd_borrow_buf (cabd , arc_hdr_size (hdr ));
2012
2018
2013
2019
ret = zio_decompress_data (HDR_GET_COMPRESS (hdr ),
@@ -3312,9 +3318,11 @@ arc_buf_destroy_impl(arc_buf_t *buf)
3312
3318
}
3313
3319
3314
3320
static void
3315
- arc_hdr_alloc_abd (arc_buf_hdr_t * hdr , boolean_t alloc_rdata )
3321
+ arc_hdr_alloc_abd (arc_buf_hdr_t * hdr , int alloc_flags )
3316
3322
{
3317
3323
uint64_t size ;
3324
+ boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA ) != 0 );
3325
+ boolean_t do_adapt = ((alloc_flags & ARC_HDR_DO_ADAPT ) != 0 );
3318
3326
3319
3327
ASSERT3U (HDR_GET_LSIZE (hdr ), > , 0 );
3320
3328
ASSERT (HDR_HAS_L1HDR (hdr ));
@@ -3324,13 +3332,15 @@ arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, boolean_t alloc_rdata)
3324
3332
if (alloc_rdata ) {
3325
3333
size = HDR_GET_PSIZE (hdr );
3326
3334
ASSERT3P (hdr -> b_crypt_hdr .b_rabd , = = , NULL );
3327
- hdr -> b_crypt_hdr .b_rabd = arc_get_data_abd (hdr , size , hdr );
3335
+ hdr -> b_crypt_hdr .b_rabd = arc_get_data_abd (hdr , size , hdr ,
3336
+ do_adapt );
3328
3337
ASSERT3P (hdr -> b_crypt_hdr .b_rabd , != , NULL );
3329
3338
ARCSTAT_INCR (arcstat_raw_size , size );
3330
3339
} else {
3331
3340
size = arc_hdr_size (hdr );
3332
3341
ASSERT3P (hdr -> b_l1hdr .b_pabd , = = , NULL );
3333
- hdr -> b_l1hdr .b_pabd = arc_get_data_abd (hdr , size , hdr );
3342
+ hdr -> b_l1hdr .b_pabd = arc_get_data_abd (hdr , size , hdr ,
3343
+ do_adapt );
3334
3344
ASSERT3P (hdr -> b_l1hdr .b_pabd , != , NULL );
3335
3345
}
3336
3346
@@ -3382,13 +3392,15 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
3382
3392
arc_buf_contents_t type , boolean_t alloc_rdata )
3383
3393
{
3384
3394
arc_buf_hdr_t * hdr ;
3395
+ int flags = ARC_HDR_DO_ADAPT ;
3385
3396
3386
3397
VERIFY (type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA );
3387
3398
if (protected ) {
3388
3399
hdr = kmem_cache_alloc (hdr_full_crypt_cache , KM_PUSHPAGE );
3389
3400
} else {
3390
3401
hdr = kmem_cache_alloc (hdr_full_cache , KM_PUSHPAGE );
3391
3402
}
3403
+ flags |= alloc_rdata ? ARC_HDR_ALLOC_RDATA : 0 ;
3392
3404
3393
3405
ASSERT (HDR_EMPTY (hdr ));
3394
3406
ASSERT3P (hdr -> b_l1hdr .b_freeze_cksum , = = , NULL );
@@ -3412,7 +3424,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
3412
3424
* the compressed or uncompressed data depending on the block
3413
3425
* it references and compressed arc enablement.
3414
3426
*/
3415
- arc_hdr_alloc_abd (hdr , alloc_rdata );
3427
+ arc_hdr_alloc_abd (hdr , flags );
3416
3428
ASSERT (zfs_refcount_is_zero (& hdr -> b_l1hdr .b_refcnt ));
3417
3429
3418
3430
return (hdr );
@@ -5507,11 +5519,12 @@ arc_is_overflowing(void)
5507
5519
}
5508
5520
5509
5521
static abd_t *
5510
- arc_get_data_abd (arc_buf_hdr_t * hdr , uint64_t size , void * tag )
5522
+ arc_get_data_abd (arc_buf_hdr_t * hdr , uint64_t size , void * tag ,
5523
+ boolean_t do_adapt )
5511
5524
{
5512
5525
arc_buf_contents_t type = arc_buf_type (hdr );
5513
5526
5514
- arc_get_data_impl (hdr , size , tag );
5527
+ arc_get_data_impl (hdr , size , tag , do_adapt );
5515
5528
if (type == ARC_BUFC_METADATA ) {
5516
5529
return (abd_alloc (size , B_TRUE ));
5517
5530
} else {
@@ -5525,7 +5538,7 @@ arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5525
5538
{
5526
5539
arc_buf_contents_t type = arc_buf_type (hdr );
5527
5540
5528
- arc_get_data_impl (hdr , size , tag );
5541
+ arc_get_data_impl (hdr , size , tag , B_TRUE );
5529
5542
if (type == ARC_BUFC_METADATA ) {
5530
5543
return (zio_buf_alloc (size ));
5531
5544
} else {
@@ -5541,12 +5554,14 @@ arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5541
5554
* limit, we'll only signal the reclaim thread and continue on.
5542
5555
*/
5543
5556
static void
5544
- arc_get_data_impl (arc_buf_hdr_t * hdr , uint64_t size , void * tag )
5557
+ arc_get_data_impl (arc_buf_hdr_t * hdr , uint64_t size , void * tag ,
5558
+ boolean_t do_adapt )
5545
5559
{
5546
5560
arc_state_t * state = hdr -> b_l1hdr .b_state ;
5547
5561
arc_buf_contents_t type = arc_buf_type (hdr );
5548
5562
5549
- arc_adapt (size , state );
5563
+ if (do_adapt )
5564
+ arc_adapt (size , state );
5550
5565
5551
5566
/*
5552
5567
* If arc_size is currently overflowing, and has grown past our
@@ -6346,6 +6361,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
6346
6361
boolean_t devw = B_FALSE ;
6347
6362
uint64_t size ;
6348
6363
abd_t * hdr_abd ;
6364
+ int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0 ;
6349
6365
6350
6366
/*
6351
6367
* Gracefully handle a damaged logical block size as a
@@ -6424,8 +6440,9 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
6424
6440
* do this after we've called arc_access() to
6425
6441
* avoid hitting an assert in remove_reference().
6426
6442
*/
6443
+ arc_adapt (arc_hdr_size (hdr ), hdr -> b_l1hdr .b_state );
6427
6444
arc_access (hdr , hash_lock );
6428
- arc_hdr_alloc_abd (hdr , encrypted_read );
6445
+ arc_hdr_alloc_abd (hdr , alloc_flags );
6429
6446
}
6430
6447
6431
6448
if (encrypted_read ) {
@@ -6869,7 +6886,7 @@ arc_release(arc_buf_t *buf, void *tag)
6869
6886
if (arc_can_share (hdr , lastbuf )) {
6870
6887
arc_share_buf (hdr , lastbuf );
6871
6888
} else {
6872
- arc_hdr_alloc_abd (hdr , B_FALSE );
6889
+ arc_hdr_alloc_abd (hdr , ARC_HDR_DO_ADAPT );
6873
6890
abd_copy_from_buf (hdr -> b_l1hdr .b_pabd ,
6874
6891
buf -> b_data , psize );
6875
6892
}
@@ -7104,7 +7121,7 @@ arc_write_ready(zio_t *zio)
7104
7121
if (ARC_BUF_ENCRYPTED (buf )) {
7105
7122
ASSERT3U (psize , > , 0 );
7106
7123
ASSERT (ARC_BUF_COMPRESSED (buf ));
7107
- arc_hdr_alloc_abd (hdr , B_TRUE );
7124
+ arc_hdr_alloc_abd (hdr , ARC_HDR_DO_ADAPT | ARC_HDR_ALLOC_RDATA );
7108
7125
abd_copy (hdr -> b_crypt_hdr .b_rabd , zio -> io_abd , psize );
7109
7126
} else if (zfs_abd_scatter_enabled || !arc_can_share (hdr , buf )) {
7110
7127
/*
@@ -7114,16 +7131,17 @@ arc_write_ready(zio_t *zio)
7114
7131
*/
7115
7132
if (BP_IS_ENCRYPTED (bp )) {
7116
7133
ASSERT3U (psize , > , 0 );
7117
- arc_hdr_alloc_abd (hdr , B_TRUE );
7134
+ arc_hdr_alloc_abd (hdr ,
7135
+ ARC_HDR_DO_ADAPT |ARC_HDR_ALLOC_RDATA );
7118
7136
abd_copy (hdr -> b_crypt_hdr .b_rabd , zio -> io_abd , psize );
7119
7137
} else if (arc_hdr_get_compress (hdr ) != ZIO_COMPRESS_OFF &&
7120
7138
!ARC_BUF_COMPRESSED (buf )) {
7121
7139
ASSERT3U (psize , > , 0 );
7122
- arc_hdr_alloc_abd (hdr , B_FALSE );
7140
+ arc_hdr_alloc_abd (hdr , ARC_HDR_DO_ADAPT );
7123
7141
abd_copy (hdr -> b_l1hdr .b_pabd , zio -> io_abd , psize );
7124
7142
} else {
7125
7143
ASSERT3U (zio -> io_orig_size , = = , arc_hdr_size (hdr ));
7126
- arc_hdr_alloc_abd (hdr , B_FALSE );
7144
+ arc_hdr_alloc_abd (hdr , ARC_HDR_DO_ADAPT );
7127
7145
abd_copy_from_buf (hdr -> b_l1hdr .b_pabd , buf -> b_data ,
7128
7146
arc_buf_size (buf ));
7129
7147
}
@@ -8418,7 +8436,8 @@ l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
8418
8436
* until arc_read_done().
8419
8437
*/
8420
8438
if (BP_IS_ENCRYPTED (bp )) {
8421
- abd_t * eabd = arc_get_data_abd (hdr , arc_hdr_size (hdr ), hdr );
8439
+ abd_t * eabd = arc_get_data_abd (hdr , arc_hdr_size (hdr ), hdr ,
8440
+ B_TRUE );
8422
8441
8423
8442
zio_crypt_decode_params_bp (bp , salt , iv );
8424
8443
zio_crypt_decode_mac_bp (bp , mac );
@@ -8454,7 +8473,8 @@ l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
8454
8473
*/
8455
8474
if (HDR_GET_COMPRESS (hdr ) != ZIO_COMPRESS_OFF &&
8456
8475
!HDR_COMPRESSION_ENABLED (hdr )) {
8457
- abd_t * cabd = arc_get_data_abd (hdr , arc_hdr_size (hdr ), hdr );
8476
+ abd_t * cabd = arc_get_data_abd (hdr , arc_hdr_size (hdr ), hdr ,
8477
+ B_TRUE );
8458
8478
void * tmp = abd_borrow_buf (cabd , arc_hdr_size (hdr ));
8459
8479
8460
8480
ret = zio_decompress_data (HDR_GET_COMPRESS (hdr ),
0 commit comments