Lines Matching full:ea

31 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
44 static void assert_slb_presence(bool present, unsigned long ea) in assert_slb_presence() argument
58 ea &= ~((1UL << SID_SHIFT) - 1); in assert_slb_presence()
59 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); in assert_slb_presence()
65 static inline void slb_shadow_update(unsigned long ea, int ssize, in slb_shadow_update() argument
77 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
78 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
86 static inline void create_shadowed_slbe(unsigned long ea, int ssize, in create_shadowed_slbe() argument
95 slb_shadow_update(ea, ssize, flags, index); in create_shadowed_slbe()
97 assert_slb_presence(false, ea); in create_shadowed_slbe()
99 : "r" (mk_vsid_data(ea, ssize, flags)), in create_shadowed_slbe()
100 "r" (mk_esid_data(ea, ssize, index)) in create_shadowed_slbe()
267 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
270 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
296 static bool preload_add(struct thread_info *ti, unsigned long ea) in preload_add() argument
303 if (ea & ESID_MASK_1T) in preload_add()
304 ea &= ESID_MASK_1T; in preload_add()
307 esid = ea >> SID_SHIFT; in preload_add()
520 unsigned long ea; in switch_slb() local
523 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; in switch_slb()
525 slb_allocate_user(mm, ea); in switch_slb()
670 static long slb_insert_entry(unsigned long ea, unsigned long context, in slb_insert_entry() argument
677 vsid = get_vsid(context, ea, ssize); in slb_insert_entry()
695 esid_data = mk_esid_data(ea, ssize, index); in slb_insert_entry()
703 assert_slb_presence(false, ea); in slb_insert_entry()
736 static long slb_allocate_kernel(unsigned long ea, unsigned long id) in slb_allocate_kernel() argument
745 if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) in slb_allocate_kernel()
753 if (ea >= H_VMEMMAP_END) in slb_allocate_kernel()
760 if (ea >= H_VMALLOC_END) in slb_allocate_kernel()
767 if (ea >= H_KERN_IO_END) in slb_allocate_kernel()
780 context = get_kernel_context(ea); in slb_allocate_kernel()
782 return slb_insert_entry(ea, context, flags, ssize, true); in slb_allocate_kernel()
785 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) in slb_allocate_user() argument
796 if (ea >= mm_ctx_slb_addr_limit(&mm->context)) in slb_allocate_user()
799 context = get_user_context(&mm->context, ea); in slb_allocate_user()
803 if (unlikely(ea >= H_PGTABLE_RANGE)) { in slb_allocate_user()
808 ssize = user_segment_size(ea); in slb_allocate_user()
810 bpsize = get_slice_psize(mm, ea); in slb_allocate_user()
813 return slb_insert_entry(ea, context, flags, ssize, false); in slb_allocate_user()
818 unsigned long ea = regs->dar; in DEFINE_INTERRUPT_HANDLER_RAW() local
819 unsigned long id = get_region_id(ea); in DEFINE_INTERRUPT_HANDLER_RAW()
851 err = slb_allocate_kernel(ea, id); in DEFINE_INTERRUPT_HANDLER_RAW()
863 err = slb_allocate_user(mm, ea); in DEFINE_INTERRUPT_HANDLER_RAW()
865 preload_add(current_thread_info(), ea); in DEFINE_INTERRUPT_HANDLER_RAW()