25
25
*/
26
26
27
27
#include <atomic.h>
28
- #include <assert.h>
29
- #include <pthread.h>
30
-
31
- /*
32
- * All operations are implemented by serializing them through a global
33
- * pthread mutex. This provides a correct generic implementation.
34
- * However all supported architectures are encouraged to provide a
35
- * native implementation is assembly for performance reasons.
36
- */
37
- pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER ;
38
28
39
29
/*
40
30
* These are the void returning variants
@@ -43,9 +33,7 @@ pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER;
43
33
#define ATOMIC_INC (name , type ) \
44
34
void atomic_inc_##name(volatile type *target) \
45
35
{ \
46
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
47
- (*target)++; \
48
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
36
+ __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \
49
37
}
50
38
51
39
ATOMIC_INC (8 , uint8_t )
@@ -61,9 +49,7 @@ ATOMIC_INC(64, uint64_t)
61
49
#define ATOMIC_DEC (name , type ) \
62
50
void atomic_dec_##name(volatile type *target) \
63
51
{ \
64
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
65
- (*target)--; \
66
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
52
+ __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \
67
53
}
68
54
69
55
ATOMIC_DEC (8 , uint8_t )
@@ -79,9 +65,7 @@ ATOMIC_DEC(64, uint64_t)
79
65
#define ATOMIC_ADD (name , type1 , type2 ) \
80
66
void atomic_add_##name(volatile type1 *target, type2 bits) \
81
67
{ \
82
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
83
- *target += bits; \
84
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
68
+ __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \
85
69
}
86
70
87
71
ATOMIC_ADD (8 , uint8_t , int8_t )
@@ -96,18 +80,14 @@ ATOMIC_ADD(64, uint64_t, int64_t)
96
80
void
97
81
atomic_add_ptr (volatile void * target , ssize_t bits )
98
82
{
99
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
100
- * (caddr_t * )target += bits ;
101
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
83
+ __atomic_add_fetch ((void * * )target , bits , __ATOMIC_SEQ_CST );
102
84
}
103
85
104
86
105
87
#define ATOMIC_SUB (name , type1 , type2 ) \
106
88
void atomic_sub_##name(volatile type1 *target, type2 bits) \
107
89
{ \
108
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
109
- *target -= bits; \
110
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
90
+ __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \
111
91
}
112
92
113
93
ATOMIC_SUB (8 , uint8_t , int8_t )
@@ -122,18 +102,14 @@ ATOMIC_SUB(64, uint64_t, int64_t)
122
102
void
123
103
atomic_sub_ptr (volatile void * target , ssize_t bits )
124
104
{
125
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
126
- * (caddr_t * )target -= bits ;
127
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
105
+ __atomic_sub_fetch ((void * * )target , bits , __ATOMIC_SEQ_CST );
128
106
}
129
107
130
108
131
109
#define ATOMIC_OR (name , type ) \
132
110
void atomic_or_##name(volatile type *target, type bits) \
133
111
{ \
134
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
135
- *target |= bits; \
136
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
112
+ __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \
137
113
}
138
114
139
115
ATOMIC_OR (8 , uint8_t )
@@ -149,9 +125,7 @@ ATOMIC_OR(64, uint64_t)
149
125
#define ATOMIC_AND (name , type ) \
150
126
void atomic_and_##name(volatile type *target, type bits) \
151
127
{ \
152
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
153
- *target &= bits; \
154
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
128
+ __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \
155
129
}
156
130
157
131
ATOMIC_AND (8 , uint8_t )
@@ -171,11 +145,7 @@ ATOMIC_AND(64, uint64_t)
171
145
#define ATOMIC_INC_NV (name , type ) \
172
146
type atomic_inc_##name##_nv(volatile type *target) \
173
147
{ \
174
- type rc; \
175
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
176
- rc = (++(*target)); \
177
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
178
- return (rc); \
148
+ return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \
179
149
}
180
150
181
151
ATOMIC_INC_NV (8 , uint8_t )
@@ -191,11 +161,7 @@ ATOMIC_INC_NV(64, uint64_t)
191
161
#define ATOMIC_DEC_NV (name , type ) \
192
162
type atomic_dec_##name##_nv(volatile type *target) \
193
163
{ \
194
- type rc; \
195
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
196
- rc = (--(*target)); \
197
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
198
- return (rc); \
164
+ return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \
199
165
}
200
166
201
167
ATOMIC_DEC_NV (8 , uint8_t )
@@ -209,13 +175,9 @@ ATOMIC_DEC_NV(64, uint64_t)
209
175
210
176
211
177
#define ATOMIC_ADD_NV (name , type1 , type2 ) \
212
- type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits)\
178
+ type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \
213
179
{ \
214
- type1 rc; \
215
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
216
- rc = (*target += bits); \
217
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
218
- return (rc); \
180
+ return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \
219
181
}
220
182
221
183
ATOMIC_ADD_NV (8 , uint8_t , int8_t )
@@ -230,24 +192,14 @@ ATOMIC_ADD_NV(64, uint64_t, int64_t)
230
192
void *
231
193
atomic_add_ptr_nv (volatile void * target , ssize_t bits )
232
194
{
233
- void * ptr ;
234
-
235
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
236
- ptr = (* (caddr_t * )target += bits );
237
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
238
-
239
- return (ptr );
195
+ return __atomic_add_fetch ((void * * )target , bits , __ATOMIC_SEQ_CST );
240
196
}
241
197
242
198
243
199
#define ATOMIC_SUB_NV (name , type1 , type2 ) \
244
- type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\
200
+ type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \
245
201
{ \
246
- type1 rc; \
247
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
248
- rc = (*target -= bits); \
249
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
250
- return (rc); \
202
+ return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \
251
203
}
252
204
253
205
ATOMIC_SUB_NV (8 , uint8_t , int8_t )
@@ -262,24 +214,14 @@ ATOMIC_SUB_NV(64, uint64_t, int64_t)
262
214
void *
263
215
atomic_sub_ptr_nv (volatile void * target , ssize_t bits )
264
216
{
265
- void * ptr ;
266
-
267
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
268
- ptr = (* (caddr_t * )target -= bits );
269
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
270
-
271
- return (ptr );
217
+ return __atomic_sub_fetch ((void * * )target , bits , __ATOMIC_SEQ_CST );
272
218
}
273
219
274
220
275
221
#define ATOMIC_OR_NV (name , type ) \
276
222
type atomic_or_##name##_nv(volatile type *target, type bits) \
277
223
{ \
278
- type rc; \
279
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
280
- rc = (*target |= bits); \
281
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
282
- return (rc); \
224
+ return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \
283
225
}
284
226
285
227
ATOMIC_OR_NV (8 , uint8_t )
@@ -295,11 +237,7 @@ ATOMIC_OR_NV(64, uint64_t)
295
237
#define ATOMIC_AND_NV (name , type ) \
296
238
type atomic_and_##name##_nv(volatile type *target, type bits) \
297
239
{ \
298
- type rc; \
299
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
300
- rc = (*target &= bits); \
301
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
302
- return (rc); \
240
+ return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \
303
241
}
304
242
305
243
ATOMIC_AND_NV (8 , uint8_t )
@@ -313,19 +251,15 @@ ATOMIC_AND_NV(64, uint64_t)
313
251
314
252
315
253
/*
316
- * If *arg1 == arg2 , set *arg1 = arg3 ; return old value
254
+ * If *tgt == exp , set *tgt = des ; return old value
317
255
*/
318
256
319
257
#define ATOMIC_CAS (name , type ) \
320
- type atomic_cas_##name(volatile type *target, type arg1 , type arg2 ) \
258
+ type atomic_cas_##name(volatile type *target, type exp , type des ) \
321
259
{ \
322
- type old; \
323
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
324
- old = *target; \
325
- if (old == arg1) \
326
- *target = arg2; \
327
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
328
- return (old); \
260
+ __atomic_compare_exchange_n(target, &exp, des, B_FALSE, \
261
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
262
+ return (exp); \
329
263
}
330
264
331
265
ATOMIC_CAS (8 , uint8_t )
@@ -338,17 +272,12 @@ ATOMIC_CAS(ulong, ulong_t)
338
272
ATOMIC_CAS (64 , uint64_t )
339
273
340
274
void *
341
- atomic_cas_ptr (volatile void * target , void * arg1 , void * arg2 )
275
+ atomic_cas_ptr (volatile void * target , void * exp , void * des )
342
276
{
343
- void * old ;
344
-
345
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
346
- old = * (void * * )target ;
347
- if (old == arg1 )
348
- * (void * * )target = arg2 ;
349
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
350
277
351
- return (old );
278
+ __atomic_compare_exchange_n ((void * * )target , & exp , des , B_FALSE ,
279
+ __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST );
280
+ return (exp );
352
281
}
353
282
354
283
@@ -359,12 +288,7 @@ atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
359
288
#define ATOMIC_SWAP (name , type ) \
360
289
type atomic_swap_##name(volatile type *target, type bits) \
361
290
{ \
362
- type old; \
363
- VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
364
- old = *target; \
365
- *target = bits; \
366
- VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
367
- return (old); \
291
+ return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \
368
292
}
369
293
370
294
ATOMIC_SWAP (8 , uint8_t )
@@ -380,71 +304,46 @@ ATOMIC_SWAP(64, uint64_t)
380
304
void *
381
305
atomic_swap_ptr (volatile void * target , void * bits )
382
306
{
383
- void * old ;
384
-
385
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
386
- old = * (void * * )target ;
387
- * (void * * )target = bits ;
388
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
389
-
390
- return (old );
307
+ return (__atomic_exchange_n ((void * * )target , bits , __ATOMIC_SEQ_CST ));
391
308
}
392
309
393
310
394
311
int
395
312
atomic_set_long_excl (volatile ulong_t * target , uint_t value )
396
313
{
397
- ulong_t bit ;
398
-
399
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
400
- bit = (1UL << value );
401
- if ((* target & bit ) != 0 ) {
402
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
403
- return (-1 );
404
- }
405
- * target |= bit ;
406
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
407
-
408
- return (0 );
314
+ ulong_t bit = 1UL << value ;
315
+ ulong_t old = __atomic_fetch_or (target , bit , __ATOMIC_SEQ_CST );
316
+ return (old & bit ) ? -1 : 0 ;
409
317
}
410
318
411
319
int
412
320
atomic_clear_long_excl (volatile ulong_t * target , uint_t value )
413
321
{
414
- ulong_t bit ;
415
-
416
- VERIFY3S (pthread_mutex_lock (& atomic_lock ), = = , 0 );
417
- bit = (1UL << value );
418
- if ((* target & bit ) == 0 ) {
419
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
420
- return (-1 );
421
- }
422
- * target &= ~bit ;
423
- VERIFY3S (pthread_mutex_unlock (& atomic_lock ), = = , 0 );
424
-
425
- return (0 );
322
+ ulong_t bit = 1UL << value ;
323
+ ulong_t old = __atomic_fetch_and (target , ~bit , __ATOMIC_SEQ_CST );
324
+ return ((old & bit ) ? 0 : -1 );
426
325
}
427
326
428
327
void
429
328
membar_enter (void )
430
329
{
431
- /* XXX - Implement me */
330
+ __atomic_thread_fence ( __ATOMIC_SEQ_CST );
432
331
}
433
332
434
333
void
435
334
membar_exit (void )
436
335
{
437
- /* XXX - Implement me */
336
+ __atomic_thread_fence ( __ATOMIC_SEQ_CST );
438
337
}
439
338
440
339
void
441
340
membar_producer (void )
442
341
{
443
- /* XXX - Implement me */
342
+ __atomic_thread_fence ( __ATOMIC_RELEASE );
444
343
}
445
344
446
345
void
447
346
membar_consumer (void )
448
347
{
449
- /* XXX - Implement me */
348
+ __atomic_thread_fence ( __ATOMIC_ACQUIRE );
450
349
}
0 commit comments