Skip to content

Commit 4d349ee

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "The diffstat is dominated by changes to our TLB invalidation errata handling and the introduction of a new GCS selftest to catch one of the issues that is fixed here relating to PROT_NONE mappings. - Fix cpufreq warning due to attempting a cross-call with interrupts masked when reading local AMU counters - Fix DEBUG_PREEMPT warning from the delay loop when it tries to access per-cpu errata workaround state for the virtual counter - Re-jig and optimise our TLB invalidation errata workarounds in preparation for more hardware brokenness - Fix GCS mappings to interact properly with PROT_NONE and to avoid corrupting the pte on CPUs with FEAT_LPA2 - Fix ioremap_prot() to extract only the memory attributes from the user pte and ignore all the other 'prot' bits" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: topology: Fix false warning in counters_read_on_cpu() for same-CPU reads arm64: Fix sampling the "stable" virtual counter in preemptible section arm64: tlb: Optimize ARM64_WORKAROUND_REPEAT_TLBI arm64: tlb: Allow XZR argument to TLBI ops kselftest: arm64: Check access to GCS after mprotect(PROT_NONE) arm64: gcs: Honour mprotect(PROT_NONE) on shadow stack mappings arm64: gcs: Do not set PTE_SHARED on GCS mappings if FEAT_LPA2 is enabled arm64: io: Extract user memory type in ioremap_prot() arm64: io: Rename ioremap_prot() to __ioremap_prot()
2 parents 1c63df2 + df6e4ab commit 4d349ee

File tree

14 files changed

+179
-60
lines changed

14 files changed

+179
-60
lines changed

arch/arm64/include/asm/io.h

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -264,19 +264,33 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count)
264264
typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
265265
pgprot_t *prot);
266266
int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
267+
void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
267268

268-
#define ioremap_prot ioremap_prot
269+
static inline void __iomem *ioremap_prot(phys_addr_t phys, size_t size,
270+
pgprot_t user_prot)
271+
{
272+
pgprot_t prot;
273+
ptdesc_t user_prot_val = pgprot_val(user_prot);
274+
275+
if (WARN_ON_ONCE(!(user_prot_val & PTE_USER)))
276+
return NULL;
269277

270-
#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
278+
prot = __pgprot_modify(PAGE_KERNEL, PTE_ATTRINDX_MASK,
279+
user_prot_val & PTE_ATTRINDX_MASK);
280+
return __ioremap_prot(phys, size, prot);
281+
}
282+
#define ioremap_prot ioremap_prot
271283

284+
#define ioremap(addr, size) \
285+
__ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
272286
#define ioremap_wc(addr, size) \
273-
ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
287+
__ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
274288
#define ioremap_np(addr, size) \
275-
ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
289+
__ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
276290

277291

278292
#define ioremap_encrypted(addr, size) \
279-
ioremap_prot((addr), (size), PAGE_KERNEL)
293+
__ioremap_prot((addr), (size), PAGE_KERNEL)
280294

281295
/*
282296
* io{read,write}{16,32,64}be() macros
@@ -297,7 +311,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
297311
if (pfn_is_map_memory(__phys_to_pfn(addr)))
298312
return (void __iomem *)__phys_to_virt(addr);
299313

300-
return ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
314+
return __ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
301315
}
302316

303317
/*

arch/arm64/include/asm/pgtable-prot.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -164,9 +164,6 @@ static inline bool __pure lpa2_is_enabled(void)
164164
#define _PAGE_GCS (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_WRITE | PTE_USER)
165165
#define _PAGE_GCS_RO (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_USER)
166166

167-
#define PAGE_GCS __pgprot(_PAGE_GCS)
168-
#define PAGE_GCS_RO __pgprot(_PAGE_GCS_RO)
169-
170167
#define PIE_E0 ( \
171168
PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_GCS) | \
172169
PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \

arch/arm64/include/asm/tlbflush.h

Lines changed: 37 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,11 @@
3131
*/
3232
#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
3333
"tlbi " #op "\n" \
34-
ALTERNATIVE("nop\n nop", \
35-
"dsb ish\n tlbi " #op, \
36-
ARM64_WORKAROUND_REPEAT_TLBI, \
37-
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
3834
: : )
3935

4036
#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
41-
"tlbi " #op ", %0\n" \
42-
ALTERNATIVE("nop\n nop", \
43-
"dsb ish\n tlbi " #op ", %0", \
44-
ARM64_WORKAROUND_REPEAT_TLBI, \
45-
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
46-
: : "r" (arg))
37+
"tlbi " #op ", %x0\n" \
38+
: : "rZ" (arg))
4739

4840
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
4941

@@ -181,6 +173,34 @@ static inline unsigned long get_trans_granule(void)
181173
(__pages >> (5 * (scale) + 1)) - 1; \
182174
})
183175

176+
#define __repeat_tlbi_sync(op, arg...) \
177+
do { \
178+
if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) \
179+
break; \
180+
__tlbi(op, ##arg); \
181+
dsb(ish); \
182+
} while (0)
183+
184+
/*
185+
* Complete broadcast TLB maintenance issued by the host which invalidates
186+
* stage 1 information in the host's own translation regime.
187+
*/
188+
static inline void __tlbi_sync_s1ish(void)
189+
{
190+
dsb(ish);
191+
__repeat_tlbi_sync(vale1is, 0);
192+
}
193+
194+
/*
195+
* Complete broadcast TLB maintenance issued by hyp code which invalidates
196+
* stage 1 translation information in any translation regime.
197+
*/
198+
static inline void __tlbi_sync_s1ish_hyp(void)
199+
{
200+
dsb(ish);
201+
__repeat_tlbi_sync(vale2is, 0);
202+
}
203+
184204
/*
185205
* TLB Invalidation
186206
* ================
@@ -279,7 +299,7 @@ static inline void flush_tlb_all(void)
279299
{
280300
dsb(ishst);
281301
__tlbi(vmalle1is);
282-
dsb(ish);
302+
__tlbi_sync_s1ish();
283303
isb();
284304
}
285305

@@ -291,7 +311,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
291311
asid = __TLBI_VADDR(0, ASID(mm));
292312
__tlbi(aside1is, asid);
293313
__tlbi_user(aside1is, asid);
294-
dsb(ish);
314+
__tlbi_sync_s1ish();
295315
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
296316
}
297317

@@ -345,20 +365,11 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
345365
unsigned long uaddr)
346366
{
347367
flush_tlb_page_nosync(vma, uaddr);
348-
dsb(ish);
368+
__tlbi_sync_s1ish();
349369
}
350370

351371
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
352372
{
353-
/*
354-
* TLB flush deferral is not required on systems which are affected by
355-
* ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
356-
* will have two consecutive TLBI instructions with a dsb(ish) in between
357-
* defeating the purpose (i.e save overall 'dsb ish' cost).
358-
*/
359-
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
360-
return false;
361-
362373
return true;
363374
}
364375

@@ -374,7 +385,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
374385
*/
375386
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
376387
{
377-
dsb(ish);
388+
__tlbi_sync_s1ish();
378389
}
379390

380391
/*
@@ -509,7 +520,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
509520
{
510521
__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
511522
last_level, tlb_level);
512-
dsb(ish);
523+
__tlbi_sync_s1ish();
513524
}
514525

515526
static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
@@ -557,7 +568,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
557568
dsb(ishst);
558569
__flush_tlb_range_op(vaale1is, start, pages, stride, 0,
559570
TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
560-
dsb(ish);
571+
__tlbi_sync_s1ish();
561572
isb();
562573
}
563574

@@ -571,7 +582,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
571582

572583
dsb(ishst);
573584
__tlbi(vaae1is, addr);
574-
dsb(ish);
585+
__tlbi_sync_s1ish();
575586
isb();
576587
}
577588

arch/arm64/kernel/acpi.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
377377
prot = __acpi_get_writethrough_mem_attribute();
378378
}
379379
}
380-
return ioremap_prot(phys, size, prot);
380+
return __ioremap_prot(phys, size, prot);
381381
}
382382

383383
/*

arch/arm64/kernel/sys_compat.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
3737
* We pick the reserved-ASID to minimise the impact.
3838
*/
3939
__tlbi(aside1is, __TLBI_VADDR(0, 0));
40-
dsb(ish);
40+
__tlbi_sync_s1ish();
4141
}
4242

4343
ret = caches_clean_inval_user_pou(start, start + chunk);

arch/arm64/kernel/topology.c

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -400,16 +400,25 @@ static inline
400400
int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
401401
{
402402
/*
403-
* Abort call on counterless CPU or when interrupts are
404-
* disabled - can lead to deadlock in smp sync call.
403+
* Abort call on counterless CPU.
405404
*/
406405
if (!cpu_has_amu_feat(cpu))
407406
return -EOPNOTSUPP;
408407

409-
if (WARN_ON_ONCE(irqs_disabled()))
410-
return -EPERM;
411-
412-
smp_call_function_single(cpu, func, val, 1);
408+
if (irqs_disabled()) {
409+
/*
410+
* When IRQs are disabled (tick path: sched_tick ->
411+
* topology_scale_freq_tick or cppc_scale_freq_tick), only local
412+
* CPU counter reads are allowed. Remote CPU counter read would
413+
* require smp_call_function_single() which is unsafe with IRQs
414+
* disabled.
415+
*/
416+
if (WARN_ON_ONCE(cpu != smp_processor_id()))
417+
return -EPERM;
418+
func(val);
419+
} else {
420+
smp_call_function_single(cpu, func, val, 1);
421+
}
413422

414423
return 0;
415424
}

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
271271
*/
272272
dsb(ishst);
273273
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
274-
dsb(ish);
274+
__tlbi_sync_s1ish_hyp();
275275
isb();
276276
}
277277

arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
169169
*/
170170
dsb(ish);
171171
__tlbi(vmalle1is);
172-
dsb(ish);
172+
__tlbi_sync_s1ish_hyp();
173173
isb();
174174

175175
exit_vmid_context(&cxt);
@@ -226,7 +226,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
226226

227227
dsb(ish);
228228
__tlbi(vmalle1is);
229-
dsb(ish);
229+
__tlbi_sync_s1ish_hyp();
230230
isb();
231231

232232
exit_vmid_context(&cxt);
@@ -240,7 +240,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
240240
enter_vmid_context(mmu, &cxt, false);
241241

242242
__tlbi(vmalls12e1is);
243-
dsb(ish);
243+
__tlbi_sync_s1ish_hyp();
244244
isb();
245245

246246
exit_vmid_context(&cxt);
@@ -266,5 +266,5 @@ void __kvm_flush_vm_context(void)
266266
/* Same remark as in enter_vmid_context() */
267267
dsb(ish);
268268
__tlbi(alle1is);
269-
dsb(ish);
269+
__tlbi_sync_s1ish_hyp();
270270
}

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -501,7 +501,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
501501
*unmapped += granule;
502502
}
503503

504-
dsb(ish);
504+
__tlbi_sync_s1ish_hyp();
505505
isb();
506506
mm_ops->put_page(ctx->ptep);
507507

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
115115
*/
116116
dsb(ish);
117117
__tlbi(vmalle1is);
118-
dsb(ish);
118+
__tlbi_sync_s1ish_hyp();
119119
isb();
120120

121121
exit_vmid_context(&cxt);
@@ -176,7 +176,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
176176

177177
dsb(ish);
178178
__tlbi(vmalle1is);
179-
dsb(ish);
179+
__tlbi_sync_s1ish_hyp();
180180
isb();
181181

182182
exit_vmid_context(&cxt);
@@ -192,7 +192,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
192192
enter_vmid_context(mmu, &cxt);
193193

194194
__tlbi(vmalls12e1is);
195-
dsb(ish);
195+
__tlbi_sync_s1ish_hyp();
196196
isb();
197197

198198
exit_vmid_context(&cxt);
@@ -217,7 +217,7 @@ void __kvm_flush_vm_context(void)
217217
{
218218
dsb(ishst);
219219
__tlbi(alle1is);
220-
dsb(ish);
220+
__tlbi_sync_s1ish_hyp();
221221
}
222222

223223
/*
@@ -358,7 +358,7 @@ int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding)
358358
default:
359359
ret = -EINVAL;
360360
}
361-
dsb(ish);
361+
__tlbi_sync_s1ish_hyp();
362362
isb();
363363

364364
if (mmu)

0 commit comments

Comments
 (0)