Lines Matching full:ea

36 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
41 static inline unsigned long mk_esid_data(unsigned long ea, int ssize, in mk_esid_data() argument
44 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; in mk_esid_data()
54 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, in mk_vsid_data() argument
57 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); in mk_vsid_data()
71 static void assert_slb_presence(bool present, unsigned long ea) in assert_slb_presence() argument
85 ea &= ~((1UL << SID_SHIFT) - 1); in assert_slb_presence()
86 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); in assert_slb_presence()
92 static inline void slb_shadow_update(unsigned long ea, int ssize, in slb_shadow_update() argument
104 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
105 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
113 static inline void create_shadowed_slbe(unsigned long ea, int ssize, in create_shadowed_slbe() argument
122 slb_shadow_update(ea, ssize, flags, index); in create_shadowed_slbe()
124 assert_slb_presence(false, ea); in create_shadowed_slbe()
126 : "r" (mk_vsid_data(ea, ssize, flags)), in create_shadowed_slbe()
127 "r" (mk_esid_data(ea, ssize, index)) in create_shadowed_slbe()
292 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
295 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
320 static bool preload_add(struct thread_info *ti, unsigned long ea) in preload_add() argument
327 if (ea & ESID_MASK_1T) in preload_add()
328 ea &= ESID_MASK_1T; in preload_add()
331 esid = ea >> SID_SHIFT; in preload_add()
544 unsigned long ea; in switch_slb() local
547 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; in switch_slb()
549 slb_allocate_user(mm, ea); in switch_slb()
694 static long slb_insert_entry(unsigned long ea, unsigned long context, in slb_insert_entry() argument
701 vsid = get_vsid(context, ea, ssize); in slb_insert_entry()
719 esid_data = mk_esid_data(ea, ssize, index); in slb_insert_entry()
727 assert_slb_presence(false, ea); in slb_insert_entry()
760 static long slb_allocate_kernel(unsigned long ea, unsigned long id) in slb_allocate_kernel() argument
769 if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) in slb_allocate_kernel()
777 if (ea >= H_VMEMMAP_END) in slb_allocate_kernel()
784 if (ea >= H_VMALLOC_END) in slb_allocate_kernel()
791 if (ea >= H_KERN_IO_END) in slb_allocate_kernel()
804 context = get_kernel_context(ea); in slb_allocate_kernel()
806 return slb_insert_entry(ea, context, flags, ssize, true); in slb_allocate_kernel()
809 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) in slb_allocate_user() argument
820 if (ea >= mm_ctx_slb_addr_limit(&mm->context)) in slb_allocate_user()
823 context = get_user_context(&mm->context, ea); in slb_allocate_user()
827 if (unlikely(ea >= H_PGTABLE_RANGE)) { in slb_allocate_user()
832 ssize = user_segment_size(ea); in slb_allocate_user()
834 bpsize = get_slice_psize(mm, ea); in slb_allocate_user()
837 return slb_insert_entry(ea, context, flags, ssize, false); in slb_allocate_user()
840 long do_slb_fault(struct pt_regs *regs, unsigned long ea) in do_slb_fault() argument
842 unsigned long id = get_region_id(ea); in do_slb_fault()
872 err = slb_allocate_kernel(ea, id); in do_slb_fault()
884 err = slb_allocate_user(mm, ea); in do_slb_fault()
886 preload_add(current_thread_info(), ea); in do_slb_fault()
892 void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) in do_bad_slb_fault() argument
896 _exception(SIGSEGV, regs, SEGV_BNDERR, ea); in do_bad_slb_fault()
898 bad_page_fault(regs, ea, SIGSEGV); in do_bad_slb_fault()