Lines Matching full:ea

32 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
45 static void assert_slb_presence(bool present, unsigned long ea) in assert_slb_presence() argument
59 ea &= ~((1UL << SID_SHIFT) - 1); in assert_slb_presence()
60 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); in assert_slb_presence()
66 static inline void slb_shadow_update(unsigned long ea, int ssize, in slb_shadow_update() argument
78 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
79 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
87 static inline void create_shadowed_slbe(unsigned long ea, int ssize, in create_shadowed_slbe() argument
96 slb_shadow_update(ea, ssize, flags, index); in create_shadowed_slbe()
98 assert_slb_presence(false, ea); in create_shadowed_slbe()
100 : "r" (mk_vsid_data(ea, ssize, flags)), in create_shadowed_slbe()
101 "r" (mk_esid_data(ea, ssize, index)) in create_shadowed_slbe()
268 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
271 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
297 static bool preload_add(struct thread_info *ti, unsigned long ea) in preload_add() argument
304 if (ea & ESID_MASK_1T) in preload_add()
305 ea &= ESID_MASK_1T; in preload_add()
308 esid = ea >> SID_SHIFT; in preload_add()
521 unsigned long ea; in switch_slb() local
524 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; in switch_slb()
526 slb_allocate_user(mm, ea); in switch_slb()
671 static long slb_insert_entry(unsigned long ea, unsigned long context, in slb_insert_entry() argument
678 vsid = get_vsid(context, ea, ssize); in slb_insert_entry()
696 esid_data = mk_esid_data(ea, ssize, index); in slb_insert_entry()
704 assert_slb_presence(false, ea); in slb_insert_entry()
737 static long slb_allocate_kernel(unsigned long ea, unsigned long id) in slb_allocate_kernel() argument
746 if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) in slb_allocate_kernel()
754 if (ea >= H_VMEMMAP_END) in slb_allocate_kernel()
761 if (ea >= H_VMALLOC_END) in slb_allocate_kernel()
768 if (ea >= H_KERN_IO_END) in slb_allocate_kernel()
781 context = get_kernel_context(ea); in slb_allocate_kernel()
783 return slb_insert_entry(ea, context, flags, ssize, true); in slb_allocate_kernel()
786 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) in slb_allocate_user() argument
797 if (ea >= mm_ctx_slb_addr_limit(&mm->context)) in slb_allocate_user()
800 context = get_user_context(&mm->context, ea); in slb_allocate_user()
804 if (unlikely(ea >= H_PGTABLE_RANGE)) { in slb_allocate_user()
809 ssize = user_segment_size(ea); in slb_allocate_user()
811 bpsize = get_slice_psize(mm, ea); in slb_allocate_user()
814 return slb_insert_entry(ea, context, flags, ssize, false); in slb_allocate_user()
819 unsigned long ea = regs->dar; in DEFINE_INTERRUPT_HANDLER_RAW() local
820 unsigned long id = get_region_id(ea); in DEFINE_INTERRUPT_HANDLER_RAW()
852 err = slb_allocate_kernel(ea, id); in DEFINE_INTERRUPT_HANDLER_RAW()
864 err = slb_allocate_user(mm, ea); in DEFINE_INTERRUPT_HANDLER_RAW()
866 preload_add(current_thread_info(), ea); in DEFINE_INTERRUPT_HANDLER_RAW()