Skip to content

Commit 97efd28

Browse files
committed
Merge tag 'x86-cleanups-2023-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 cleanups from Ingo Molnar: "The following commit deserves special mention: 22dc02f Revert "sched/fair: Move unused stub functions to header" This is in x86/cleanups, because the revert is a re-application of a number of cleanups that got removed inadvertedly" [ This also effectively undoes the amd_check_microcode() microcode declaration change I had done in my microcode loader merge in commit 42a7f6e ("Merge tag 'x86_microcode_for_v6.6_rc1' [...]"). I picked the declaration change by Arnd from this branch instead, which put it in <asm/processor.h> instead of <asm/microcode.h> like I had done in my merge resolution - Linus ] * tag 'x86-cleanups-2023-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/platform/uv: Refactor code using deprecated strncpy() interface to use strscpy() x86/hpet: Refactor code using deprecated strncpy() interface to use strscpy() x86/platform/uv: Refactor code using deprecated strcpy()/strncpy() interfaces to use strscpy() x86/qspinlock-paravirt: Fix missing-prototype warning x86/paravirt: Silence unused native_pv_lock_init() function warning x86/alternative: Add a __alt_reloc_selftest() prototype x86/purgatory: Include header for warn() declaration x86/asm: Avoid unneeded __div64_32 function definition Revert "sched/fair: Move unused stub functions to header" x86/apic: Hide unused safe_smp_processor_id() on 32-bit UP x86/cpu: Fix amd_check_microcode() declaration
2 parents 3ca9a83 + 212f07a commit 97efd28

File tree

18 files changed

+48
-40
lines changed

18 files changed

+48
-40
lines changed

arch/x86/boot/compressed/error.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include "misc.h"
88
#include "error.h"
99

10-
void warn(char *m)
10+
void warn(const char *m)
1111
{
1212
error_putstr("\n\n");
1313
error_putstr(m);

arch/x86/boot/compressed/error.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
#include <linux/compiler.h>
66

7-
void warn(char *m);
7+
void warn(const char *m);
88
void error(char *m) __noreturn;
99
void panic(const char *fmt, ...) __noreturn __cold;
1010

arch/x86/include/asm/div64.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,12 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
7171
}
7272
#define mul_u32_u32 mul_u32_u32
7373

74+
/*
75+
* __div64_32() is never called on x86, so prevent the
76+
* generic definition from getting built.
77+
*/
78+
#define __div64_32
79+
7480
#else
7581
# include <asm-generic/div64.h>
7682

arch/x86/include/asm/microcode.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,4 @@ void show_ucode_info_early(void);
7575
static inline void show_ucode_info_early(void) { }
7676
#endif /* !CONFIG_CPU_SUP_INTEL */
7777

78-
#ifdef CONFIG_CPU_SUP_AMD
79-
void amd_check_microcode(void);
80-
#else /* CONFIG_CPU_SUP_AMD */
81-
static inline void amd_check_microcode(void) {}
82-
#endif
83-
8478
#endif /* _ASM_X86_MICROCODE_H */

arch/x86/include/asm/paravirt.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
739739
".popsection")
740740

741741
extern void default_banner(void);
742+
void native_pv_lock_init(void) __init;
742743

743744
#else /* __ASSEMBLY__ */
744745

@@ -778,6 +779,12 @@ extern void default_banner(void);
778779
#endif /* __ASSEMBLY__ */
779780
#else /* CONFIG_PARAVIRT */
780781
# define default_banner x86_init_noop
782+
783+
#ifndef __ASSEMBLY__
784+
static inline void native_pv_lock_init(void)
785+
{
786+
}
787+
#endif
781788
#endif /* !CONFIG_PARAVIRT */
782789

783790
#ifndef __ASSEMBLY__

arch/x86/include/asm/processor.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -678,11 +678,13 @@ extern u32 amd_get_nodes_per_socket(void);
678678
extern u32 amd_get_highest_perf(void);
679679
extern bool cpu_has_ibpb_brtype_microcode(void);
680680
extern void amd_clear_divider(void);
681+
extern void amd_check_microcode(void);
681682
#else
682683
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
683684
static inline u32 amd_get_highest_perf(void) { return 0; }
684685
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
685686
static inline void amd_clear_divider(void) { }
687+
static inline void amd_check_microcode(void) { }
686688
#endif
687689

688690
extern unsigned long arch_align_stack(unsigned long sp);

arch/x86/include/asm/qspinlock.h

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu)
7474
*/
7575
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
7676

77-
void native_pv_lock_init(void) __init;
78-
7977
/*
8078
* Shortcut for the queued_spin_lock_slowpath() function that allows
8179
* virt to hijack it.
@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
103101

104102
return true;
105103
}
106-
#else
107-
static inline void native_pv_lock_init(void)
108-
{
109-
}
104+
110105
#endif /* CONFIG_PARAVIRT */
111106

112107
#include <asm-generic/qspinlock.h>

arch/x86/include/asm/qspinlock_paravirt.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44

55
#include <asm/ibt.h>
66

7+
void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
8+
79
/*
810
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
911
* registers. For i386, however, only 1 32-bit register needs to be saved

arch/x86/kernel/alternative.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1527,6 +1527,7 @@ static noinline void __init int3_selftest(void)
15271527

15281528
static __initdata int __alt_reloc_selftest_addr;
15291529

1530+
extern void __init __alt_reloc_selftest(void *arg);
15301531
__visible noinline void __init __alt_reloc_selftest(void *arg)
15311532
{
15321533
WARN_ON(arg != &__alt_reloc_selftest_addr);

arch/x86/kernel/apic/ipi.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -301,6 +301,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
301301
local_irq_restore(flags);
302302
}
303303

304+
#ifdef CONFIG_SMP
304305
/* must come after the send_IPI functions above for inlining */
305306
static int convert_apicid_to_cpu(int apic_id)
306307
{
@@ -329,3 +330,4 @@ int safe_smp_processor_id(void)
329330
return cpuid >= 0 ? cpuid : 0;
330331
}
331332
#endif
333+
#endif

arch/x86/kernel/apic/x2apic_uv_x.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -294,8 +294,7 @@ static void __init early_get_apic_socketid_shift(void)
294294

295295
static void __init uv_stringify(int len, char *to, char *from)
296296
{
297-
/* Relies on 'to' being NULL chars so result will be NULL terminated */
298-
strncpy(to, from, len-1);
297+
strscpy(to, from, len);
299298

300299
/* Trim trailing spaces */
301300
(void)strim(to);
@@ -1013,7 +1012,7 @@ static void __init calc_mmioh_map(enum mmioh_arch index,
10131012

10141013
/* One (UV2) mapping */
10151014
if (index == UV2_MMIOH) {
1016-
strncpy(id, "MMIOH", sizeof(id));
1015+
strscpy(id, "MMIOH", sizeof(id));
10171016
max_io = max_pnode;
10181017
mapped = 0;
10191018
goto map_exit;

arch/x86/kernel/hpet.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,7 @@ static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
421421
* the IO_APIC has been initialized.
422422
*/
423423
hc->cpu = boot_cpu_data.cpu_index;
424-
strncpy(hc->name, "hpet", sizeof(hc->name));
424+
strscpy(hc->name, "hpet", sizeof(hc->name));
425425
hpet_init_clockevent(hc, 50);
426426

427427
hc->evt.tick_resume = hpet_clkevt_legacy_resume;

arch/x86/kernel/paravirt.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,8 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
7575

7676
void __init native_pv_lock_init(void)
7777
{
78-
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
78+
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
79+
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
7980
static_branch_disable(&virt_spin_lock_key);
8081
}
8182

arch/x86/platform/uv/uv_nmi.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -202,21 +202,17 @@ static int param_set_action(const char *val, const struct kernel_param *kp)
202202
{
203203
int i;
204204
int n = ARRAY_SIZE(valid_acts);
205-
char arg[ACTION_LEN], *p;
205+
char arg[ACTION_LEN];
206206

207207
/* (remove possible '\n') */
208-
strncpy(arg, val, ACTION_LEN - 1);
209-
arg[ACTION_LEN - 1] = '\0';
210-
p = strchr(arg, '\n');
211-
if (p)
212-
*p = '\0';
208+
strscpy(arg, val, strnchrnul(val, sizeof(arg)-1, '\n') - val + 1);
213209

214210
for (i = 0; i < n; i++)
215211
if (!strcmp(arg, valid_acts[i].action))
216212
break;
217213

218214
if (i < n) {
219-
strcpy(uv_nmi_action, arg);
215+
strscpy(uv_nmi_action, arg, sizeof(uv_nmi_action));
220216
pr_info("UV: New NMI action:%s\n", uv_nmi_action);
221217
return 0;
222218
}
@@ -959,7 +955,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
959955

960956
/* Unexpected return, revert action to "dump" */
961957
if (master)
962-
strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
958+
strscpy(uv_nmi_action, "dump", sizeof(uv_nmi_action));
963959
}
964960

965961
/* Pause as all CPU's enter the NMI handler */

arch/x86/purgatory/purgatory.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <crypto/sha2.h>
1515
#include <asm/purgatory.h>
1616

17+
#include "../boot/compressed/error.h"
1718
#include "../boot/string.h"
1819

1920
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");

kernel/locking/qspinlock_paravirt.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -485,6 +485,16 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
485485
return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
486486
}
487487

488+
/*
489+
* Include the architecture specific callee-save thunk of the
490+
* __pv_queued_spin_unlock(). This thunk is put together with
491+
* __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
492+
* function close to each other sharing consecutive instruction cachelines.
493+
* Alternatively, architecture specific version of __pv_queued_spin_unlock()
494+
* can be defined.
495+
*/
496+
#include <asm/qspinlock_paravirt.h>
497+
488498
/*
489499
* PV versions of the unlock fastpath and slowpath functions to be used
490500
* instead of queued_spin_unlock().
@@ -533,16 +543,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
533543
pv_kick(node->cpu);
534544
}
535545

536-
/*
537-
* Include the architecture specific callee-save thunk of the
538-
* __pv_queued_spin_unlock(). This thunk is put together with
539-
* __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
540-
* function close to each other sharing consecutive instruction cachelines.
541-
* Alternatively, architecture specific version of __pv_queued_spin_unlock()
542-
* can be defined.
543-
*/
544-
#include <asm/qspinlock_paravirt.h>
545-
546546
#ifndef __pv_queued_spin_unlock
547547
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
548548
{

kernel/sched/fair.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -943,7 +943,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
943943
/**************************************************************
944944
* Scheduling class statistics methods:
945945
*/
946-
946+
#ifdef CONFIG_SMP
947947
int sched_update_scaling(void)
948948
{
949949
unsigned int factor = get_update_sysctl_factor();
@@ -956,6 +956,7 @@ int sched_update_scaling(void)
956956
return 0;
957957
}
958958
#endif
959+
#endif
959960

960961
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
961962

@@ -6335,9 +6336,8 @@ static inline int throttled_lb_pair(struct task_group *tg,
63356336
return 0;
63366337
}
63376338

6338-
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
6339-
63406339
#ifdef CONFIG_FAIR_GROUP_SCHED
6340+
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
63416341
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
63426342
#endif
63436343

kernel/sched/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1250,6 +1250,7 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
12501250

12511251
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
12521252
bool fi);
1253+
void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
12531254

12541255
/*
12551256
* Helpers to check if the CPU's core cookie matches with the task's cookie
@@ -2421,6 +2422,7 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
24212422
#endif
24222423

24232424
extern void schedule_idle(void);
2425+
asmlinkage void schedule_user(void);
24242426

24252427
extern void sysrq_sched_debug_show(void);
24262428
extern void sched_init_granularity(void);

0 commit comments

Comments
 (0)