Skip to content

Commit a921f07

Browse files
Merge patch series "riscv: misaligned: fix misaligned accesses handling in put/get_user()"
Clément Léger <[email protected]> says: While debugging a few problems with the misaligned access kselftest, Alexandre discovered some crash with the current code. Indeed, some misaligned access was done by the kernel using put_user(). This was resulting in trap and a kernel crash since. The path was the following: user -> kernel -> access to user memory -> misaligned trap -> trap -> kernel -> misaligned handling -> memcpy -> crash due to failed page fault while in interrupt disabled section. Last discussion about kernel misaligned handling and interrupt reenabling were actually not to reenable interrupt when handling misaligned access being done by kernel. The best solution being not to do any misaligned accesses to userspace memory, we considered a few options: - Remove any call to put/get_user() potentially doing misaligned accesses - Do not do any misaligned accesses in put/get_user() itself The second solution was the one chosen as there are too many callsites to put/get_user() that could potentially do misaligned accesses. We tried two approaches for that, either split access in two aligned accesses (and do RMW for put_user()) or call copy_from/to_user() which does not do any misaligned accesses. The later one was the simpler to implement (although the performances are probably lower than split aligned accesses but still way better than doing misaligned access emulation) and allows to support what we wanted. These commits are based on top of Alex dev/alex/get_user_misaligned_v1 branch. [Palmer: No idea what that branch is, so I'm basing it on the uaccess optimizations patch series which is the last thing to touch these.] * b4-shazam-merge riscv: uaccess: do not do misaligned accesses in get/put_user() riscv: process: use unsigned int instead of unsigned long for put_user() riscv: make unsafe user copy routines use existing assembly routines Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
2 parents 265d6ab + ca1a66c commit a921f07

File tree

6 files changed

+78
-50
lines changed

6 files changed

+78
-50
lines changed

arch/riscv/include/asm/asm-prototypes.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ long long __ashlti3(long long a, int b);
1212
#ifdef CONFIG_RISCV_ISA_V
1313

1414
#ifdef CONFIG_MMU
15-
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n);
15+
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n, bool enable_sum);
1616
#endif /* CONFIG_MMU */
1717

1818
void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,

arch/riscv/include/asm/uaccess.h

Lines changed: 22 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -169,8 +169,19 @@ do { \
169169

170170
#endif /* CONFIG_64BIT */
171171

172+
unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
173+
const void *from, unsigned long n);
174+
unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
175+
const void __user *from, unsigned long n);
176+
172177
#define __get_user_nocheck(x, __gu_ptr, label) \
173178
do { \
179+
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
180+
!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
181+
if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
182+
goto label; \
183+
break; \
184+
} \
174185
switch (sizeof(*__gu_ptr)) { \
175186
case 1: \
176187
__get_user_asm("lb", (x), __gu_ptr, label); \
@@ -297,6 +308,13 @@ do { \
297308

298309
#define __put_user_nocheck(x, __gu_ptr, label) \
299310
do { \
311+
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
312+
!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
313+
__inttype(x) val = (__inttype(x))x; \
314+
if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
315+
goto label; \
316+
break; \
317+
} \
300318
switch (sizeof(*__gu_ptr)) { \
301319
case 1: \
302320
__put_user_asm("sb", (x), __gu_ptr, label); \
@@ -450,35 +468,13 @@ static inline void user_access_restore(unsigned long enabled) { }
450468
(x) = (__force __typeof__(*(ptr)))__gu_val; \
451469
} while (0)
452470

453-
#define unsafe_copy_loop(dst, src, len, type, op, label) \
454-
while (len >= sizeof(type)) { \
455-
op(*(type *)(src), (type __user *)(dst), label); \
456-
dst += sizeof(type); \
457-
src += sizeof(type); \
458-
len -= sizeof(type); \
459-
}
460-
461471
#define unsafe_copy_to_user(_dst, _src, _len, label) \
462-
do { \
463-
char __user *__ucu_dst = (_dst); \
464-
const char *__ucu_src = (_src); \
465-
size_t __ucu_len = (_len); \
466-
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, unsafe_put_user, label); \
467-
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, unsafe_put_user, label); \
468-
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, unsafe_put_user, label); \
469-
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, unsafe_put_user, label); \
470-
} while (0)
472+
if (__asm_copy_to_user_sum_enabled(_dst, _src, _len)) \
473+
goto label;
471474

472475
#define unsafe_copy_from_user(_dst, _src, _len, label) \
473-
do { \
474-
char *__ucu_dst = (_dst); \
475-
const char __user *__ucu_src = (_src); \
476-
size_t __ucu_len = (_len); \
477-
unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u64, unsafe_get_user, label); \
478-
unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u32, unsafe_get_user, label); \
479-
unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u16, unsafe_get_user, label); \
480-
unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u8, unsafe_get_user, label); \
481-
} while (0)
476+
if (__asm_copy_from_user_sum_enabled(_dst, _src, _len)) \
477+
goto label;
482478

483479
#else /* CONFIG_MMU */
484480
#include <asm-generic/uaccess.h>

arch/riscv/kernel/process.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
5757
if (!unaligned_ctl_available())
5858
return -EINVAL;
5959

60-
return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
60+
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
6161
}
6262

6363
void __show_regs(struct pt_regs *regs)

arch/riscv/lib/riscv_v_helpers.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,11 @@
1616
#ifdef CONFIG_MMU
1717
size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
1818
int __asm_vector_usercopy(void *dst, void *src, size_t n);
19+
int __asm_vector_usercopy_sum_enabled(void *dst, void *src, size_t n);
1920
int fallback_scalar_usercopy(void *dst, void *src, size_t n);
20-
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
21+
int fallback_scalar_usercopy_sum_enabled(void *dst, void *src, size_t n);
22+
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n,
23+
bool enable_sum)
2124
{
2225
size_t remain, copied;
2326

@@ -26,7 +29,8 @@ asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
2629
goto fallback;
2730

2831
kernel_vector_begin();
29-
remain = __asm_vector_usercopy(dst, src, n);
32+
remain = enable_sum ? __asm_vector_usercopy(dst, src, n) :
33+
__asm_vector_usercopy_sum_enabled(dst, src, n);
3034
kernel_vector_end();
3135

3236
if (remain) {
@@ -40,6 +44,7 @@ asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
4044
return remain;
4145

4246
fallback:
43-
return fallback_scalar_usercopy(dst, src, n);
47+
return enable_sum ? fallback_scalar_usercopy(dst, src, n) :
48+
fallback_scalar_usercopy_sum_enabled(dst, src, n);
4449
}
4550
#endif

arch/riscv/lib/uaccess.S

Lines changed: 34 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,43 @@ SYM_FUNC_START(__asm_copy_to_user)
1717
ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_ZVE32X, CONFIG_RISCV_ISA_V)
1818
REG_L t0, riscv_v_usercopy_threshold
1919
bltu a2, t0, fallback_scalar_usercopy
20-
tail enter_vector_usercopy
20+
li a3, 1
21+
tail enter_vector_usercopy
2122
#endif
22-
SYM_FUNC_START(fallback_scalar_usercopy)
23+
SYM_FUNC_END(__asm_copy_to_user)
24+
EXPORT_SYMBOL(__asm_copy_to_user)
25+
SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
26+
EXPORT_SYMBOL(__asm_copy_from_user)
2327

28+
SYM_FUNC_START(fallback_scalar_usercopy)
2429
/* Enable access to user memory */
25-
li t6, SR_SUM
26-
csrs CSR_STATUS, t6
30+
li t6, SR_SUM
31+
csrs CSR_STATUS, t6
32+
mv t6, ra
2733

34+
call fallback_scalar_usercopy_sum_enabled
35+
36+
/* Disable access to user memory */
37+
mv ra, t6
38+
li t6, SR_SUM
39+
csrc CSR_STATUS, t6
40+
ret
41+
SYM_FUNC_END(fallback_scalar_usercopy)
42+
43+
SYM_FUNC_START(__asm_copy_to_user_sum_enabled)
44+
#ifdef CONFIG_RISCV_ISA_V
45+
ALTERNATIVE("j fallback_scalar_usercopy_sum_enabled", "nop", 0, RISCV_ISA_EXT_ZVE32X, CONFIG_RISCV_ISA_V)
46+
REG_L t0, riscv_v_usercopy_threshold
47+
bltu a2, t0, fallback_scalar_usercopy_sum_enabled
48+
li a3, 0
49+
tail enter_vector_usercopy
50+
#endif
51+
SYM_FUNC_END(__asm_copy_to_user_sum_enabled)
52+
SYM_FUNC_ALIAS(__asm_copy_from_user_sum_enabled, __asm_copy_to_user_sum_enabled)
53+
EXPORT_SYMBOL(__asm_copy_from_user_sum_enabled)
54+
EXPORT_SYMBOL(__asm_copy_to_user_sum_enabled)
55+
56+
SYM_FUNC_START(fallback_scalar_usercopy_sum_enabled)
2857
/*
2958
* Save the terminal address which will be used to compute the number
3059
* of bytes copied in case of a fixup exception.
@@ -178,23 +207,12 @@ SYM_FUNC_START(fallback_scalar_usercopy)
178207
bltu a0, t0, 4b /* t0 - end of dst */
179208

180209
.Lout_copy_user:
181-
/* Disable access to user memory */
182-
csrc CSR_STATUS, t6
183210
li a0, 0
184211
ret
185-
186-
/* Exception fixup code */
187212
10:
188-
/* Disable access to user memory */
189-
csrc CSR_STATUS, t6
190213
sub a0, t5, a0
191214
ret
192-
SYM_FUNC_END(__asm_copy_to_user)
193-
SYM_FUNC_END(fallback_scalar_usercopy)
194-
EXPORT_SYMBOL(__asm_copy_to_user)
195-
SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
196-
EXPORT_SYMBOL(__asm_copy_from_user)
197-
215+
SYM_FUNC_END(fallback_scalar_usercopy_sum_enabled)
198216

199217
SYM_FUNC_START(__clear_user)
200218

arch/riscv/lib/uaccess_vector.S

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,18 @@ SYM_FUNC_START(__asm_vector_usercopy)
2424
/* Enable access to user memory */
2525
li t6, SR_SUM
2626
csrs CSR_STATUS, t6
27+
mv t6, ra
2728

29+
call __asm_vector_usercopy_sum_enabled
30+
31+
/* Disable access to user memory */
32+
mv ra, t6
33+
li t6, SR_SUM
34+
csrc CSR_STATUS, t6
35+
ret
36+
SYM_FUNC_END(__asm_vector_usercopy)
37+
38+
SYM_FUNC_START(__asm_vector_usercopy_sum_enabled)
2839
loop:
2940
vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
3041
fixup vle8.v vData, (pSrc), 10f
@@ -36,8 +47,6 @@ loop:
3647

3748
/* Exception fixup for vector load is shared with normal exit */
3849
10:
39-
/* Disable access to user memory */
40-
csrc CSR_STATUS, t6
4150
mv a0, iNum
4251
ret
4352

@@ -49,4 +58,4 @@ loop:
4958
csrr t2, CSR_VSTART
5059
sub iNum, iNum, t2
5160
j 10b
52-
SYM_FUNC_END(__asm_vector_usercopy)
61+
SYM_FUNC_END(__asm_vector_usercopy_sum_enabled)

0 commit comments

Comments
 (0)