1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV architectural definitions
7 *
8 * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
9 */
10
11 #ifndef _ASM_X86_UV_UV_HUB_H
12 #define _ASM_X86_UV_UV_HUB_H
13
14 #ifdef CONFIG_X86_64
15 #include <linux/numa.h>
16 #include <linux/percpu.h>
17 #include <linux/timer.h>
18 #include <linux/io.h>
19 #include <linux/topology.h>
20 #include <asm/types.h>
21 #include <asm/percpu.h>
22 #include <asm/uv/uv_mmrs.h>
23 #include <asm/uv/bios.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/io_apic.h>
26
27
28 /*
29 * Addressing Terminology
30 *
31 * M - The low M bits of a physical address represent the offset
32 * into the blade local memory. RAM memory on a blade is physically
33 * contiguous (although various IO spaces may punch holes in
34 * it)..
35 *
36 * N - Number of bits in the node portion of a socket physical
37 * address.
38 *
39 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
40 * routers always have low bit of 1, C/MBricks have low bit
41 * equal to 0. Most addressing macros that target UV hub chips
42 * right shift the NASID by 1 to exclude the always-zero bit.
43 * NASIDs contain up to 15 bits.
44 *
45 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
46 * of nasids.
47 *
48 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
49 * of the nasid for socket usage.
50 *
51 * GPA - (global physical address) a socket physical address converted
52 * so that it can be used by the GRU as a global address. Socket
53 * physical addresses 1) need additional NASID (node) bits added
54 * to the high end of the address, and 2) unaliased if the
55 * partition does not have a physical address 0. In addition, on
56 * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
57 *
58 *
59 * NumaLink Global Physical Address Format:
60 * +--------------------------------+---------------------+
61 * |00..000| GNODE | NodeOffset |
62 * +--------------------------------+---------------------+
63 * |<-------53 - M bits --->|<--------M bits ----->
64 *
65 * M - number of node offset bits (35 .. 40)
66 *
67 *
68 * Memory/UV-HUB Processor Socket Address Format:
69 * +----------------+---------------+---------------------+
70 * |00..000000000000| PNODE | NodeOffset |
71 * +----------------+---------------+---------------------+
72 * <--- N bits --->|<--------M bits ----->
73 *
74 * M - number of node offset bits (35 .. 40)
75 * N - number of PNODE bits (0 .. 10)
76 *
77 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
78 * The actual values are configuration dependent and are set at
79 * boot time. M & N values are set by the hardware/BIOS at boot.
80 *
81 *
82 * APICID format
83 * NOTE!!!!!! This is the current format of the APICID. However, code
84 * should assume that this will change in the future. Use functions
85 * in this file for all APICID bit manipulations and conversion.
86 *
87 * 1111110000000000
88 * 5432109876543210
89 * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
90 * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
91 * pppppppppppcccch SandyBridge (15 bits in hdw reg)
92 * sssssssssss
93 *
94 * p = pnode bits
95 * l = socket number on board
96 * c = core
97 * h = hyperthread
98 * s = bits that are in the SOCKET_ID CSR
99 *
100 * Note: Processor may support fewer bits in the APICID register. The ACPI
101 * tables hold all 16 bits. Software needs to be aware of this.
102 *
103 * Unless otherwise specified, all references to APICID refer to
104 * the FULL value contained in ACPI tables, not the subset in the
105 * processor APICID register.
106 */
107
108 /*
109 * Maximum number of bricks in all partitions and in all coherency domains.
110 * This is the total number of bricks accessible in the numalink fabric. It
111 * includes all C & M bricks. Routers are NOT included.
112 *
113 * This value is also the value of the maximum number of non-router NASIDs
114 * in the numalink fabric.
115 *
116 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
117 */
118 #define UV_MAX_NUMALINK_BLADES 16384
119
120 /*
121 * Maximum number of C/Mbricks within a software SSI (hardware may support
122 * more).
123 */
124 #define UV_MAX_SSI_BLADES 256
125
126 /*
127 * The largest possible NASID of a C or M brick (+ 2)
128 */
129 #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2)
130
131 /* System Controller Interface Reg info */
132 struct uv_scir_s {
133 struct timer_list timer;
134 unsigned long offset;
135 unsigned long last;
136 unsigned long idle_on;
137 unsigned long idle_off;
138 unsigned char state;
139 unsigned char enabled;
140 };
141
142 /* GAM (globally addressed memory) range table */
143 struct uv_gam_range_s {
144 u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */
145 u16 nasid; /* node's global physical address */
146 s8 base; /* entry index of node's base addr */
147 u8 reserved;
148 };
149
150 /*
151 * The following defines attributes of the HUB chip. These attributes are
152 * frequently referenced and are kept in a common per hub struct.
153 * After setup, the struct is read only, so it should be readily
154 * available in the L3 cache on the cpu socket for the node.
155 */
156 struct uv_hub_info_s {
157 unsigned long global_mmr_base;
158 unsigned long global_mmr_shift;
159 unsigned long gpa_mask;
160 unsigned short *socket_to_node;
161 unsigned short *socket_to_pnode;
162 unsigned short *pnode_to_socket;
163 struct uv_gam_range_s *gr_table;
164 unsigned short min_socket;
165 unsigned short min_pnode;
166 unsigned char m_val;
167 unsigned char n_val;
168 unsigned char gr_table_len;
169 unsigned char hub_revision;
170 unsigned char apic_pnode_shift;
171 unsigned char gpa_shift;
172 unsigned char m_shift;
173 unsigned char n_lshift;
174 unsigned int gnode_extra;
175 unsigned long gnode_upper;
176 unsigned long lowmem_remap_top;
177 unsigned long lowmem_remap_base;
178 unsigned long global_gru_base;
179 unsigned long global_gru_shift;
180 unsigned short pnode;
181 unsigned short pnode_mask;
182 unsigned short coherency_domain_number;
183 unsigned short numa_blade_id;
184 unsigned short nr_possible_cpus;
185 unsigned short nr_online_cpus;
186 short memory_nid;
187 };
188
189 /* CPU specific info with a pointer to the hub common info struct */
190 struct uv_cpu_info_s {
191 void *p_uv_hub_info;
192 unsigned char blade_cpu_id;
193 struct uv_scir_s scir;
194 };
195 DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
196
197 #define uv_cpu_info this_cpu_ptr(&__uv_cpu_info)
198 #define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
199
200 #define uv_scir_info (&uv_cpu_info->scir)
201 #define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir)
202
203 /* Node specific hub common info struct */
204 extern void **__uv_hub_info_list;
uv_hub_info_list(int node)205 static inline struct uv_hub_info_s *uv_hub_info_list(int node)
206 {
207 return (struct uv_hub_info_s *)__uv_hub_info_list[node];
208 }
209
_uv_hub_info(void)210 static inline struct uv_hub_info_s *_uv_hub_info(void)
211 {
212 return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info;
213 }
214 #define uv_hub_info _uv_hub_info()
215
uv_cpu_hub_info(int cpu)216 static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
217 {
218 return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
219 }
220
221 #define UV_HUB_INFO_VERSION 0x7150
222 extern int uv_hub_info_version(void);
uv_hub_info_check(int version)223 static inline int uv_hub_info_check(int version)
224 {
225 if (uv_hub_info_version() == version)
226 return 0;
227
228 pr_crit("UV: uv_hub_info version(%x) mismatch, expecting(%x)\n",
229 uv_hub_info_version(), version);
230
231 BUG(); /* Catastrophic - cannot continue on unknown UV system */
232 }
233 #define _uv_hub_info_check() uv_hub_info_check(UV_HUB_INFO_VERSION)
234
235 /*
236 * HUB revision ranges for each UV HUB architecture.
237 * This is a software convention - NOT the hardware revision numbers in
238 * the hub chip.
239 */
240 #define UV1_HUB_REVISION_BASE 1
241 #define UV2_HUB_REVISION_BASE 3
242 #define UV3_HUB_REVISION_BASE 5
243 #define UV4_HUB_REVISION_BASE 7
244 #define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
245
246 #ifdef UV1_HUB_IS_SUPPORTED
is_uv1_hub(void)247 static inline int is_uv1_hub(void)
248 {
249 return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
250 }
251 #else
is_uv1_hub(void)252 static inline int is_uv1_hub(void)
253 {
254 return 0;
255 }
256 #endif
257
258 #ifdef UV2_HUB_IS_SUPPORTED
is_uv2_hub(void)259 static inline int is_uv2_hub(void)
260 {
261 return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
262 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
263 }
264 #else
is_uv2_hub(void)265 static inline int is_uv2_hub(void)
266 {
267 return 0;
268 }
269 #endif
270
271 #ifdef UV3_HUB_IS_SUPPORTED
is_uv3_hub(void)272 static inline int is_uv3_hub(void)
273 {
274 return ((uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE) &&
275 (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE));
276 }
277 #else
is_uv3_hub(void)278 static inline int is_uv3_hub(void)
279 {
280 return 0;
281 }
282 #endif
283
284 /* First test "is UV4A", then "is UV4" */
285 #ifdef UV4A_HUB_IS_SUPPORTED
is_uv4a_hub(void)286 static inline int is_uv4a_hub(void)
287 {
288 return (uv_hub_info->hub_revision >= UV4A_HUB_REVISION_BASE);
289 }
290 #else
is_uv4a_hub(void)291 static inline int is_uv4a_hub(void)
292 {
293 return 0;
294 }
295 #endif
296
297 #ifdef UV4_HUB_IS_SUPPORTED
is_uv4_hub(void)298 static inline int is_uv4_hub(void)
299 {
300 return uv_hub_info->hub_revision >= UV4_HUB_REVISION_BASE;
301 }
302 #else
is_uv4_hub(void)303 static inline int is_uv4_hub(void)
304 {
305 return 0;
306 }
307 #endif
308
is_uvx_hub(void)309 static inline int is_uvx_hub(void)
310 {
311 if (uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE)
312 return uv_hub_info->hub_revision;
313
314 return 0;
315 }
316
is_uv_hub(void)317 static inline int is_uv_hub(void)
318 {
319 #ifdef UV1_HUB_IS_SUPPORTED
320 return uv_hub_info->hub_revision;
321 #endif
322 return is_uvx_hub();
323 }
324
325 union uvh_apicid {
326 unsigned long v;
327 struct uvh_apicid_s {
328 unsigned long local_apic_mask : 24;
329 unsigned long local_apic_shift : 5;
330 unsigned long unused1 : 3;
331 unsigned long pnode_mask : 24;
332 unsigned long pnode_shift : 5;
333 unsigned long unused2 : 3;
334 } s;
335 };
336
337 /*
338 * Local & Global MMR space macros.
339 * Note: macros are intended to be used ONLY by inline functions
340 * in this file - not by other kernel code.
341 * n - NASID (full 15-bit global nasid)
342 * g - GNODE (full 15-bit global nasid, right shifted 1)
343 * p - PNODE (local part of nsids, right shifted 1)
344 */
345 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
346 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
347 #define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
348
349 #define UV1_LOCAL_MMR_BASE 0xf4000000UL
350 #define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
351 #define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
352 #define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
353
354 #define UV2_LOCAL_MMR_BASE 0xfa000000UL
355 #define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
356 #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
357 #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
358
359 #define UV3_LOCAL_MMR_BASE 0xfa000000UL
360 #define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
361 #define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
362 #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
363
364 #define UV4_LOCAL_MMR_BASE 0xfa000000UL
365 #define UV4_GLOBAL_MMR32_BASE 0xfc000000UL
366 #define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
367 #define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024)
368
369 #define UV_LOCAL_MMR_BASE ( \
370 is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
371 is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
372 is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
373 /*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
374
375 #define UV_GLOBAL_MMR32_BASE ( \
376 is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \
377 is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
378 is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
379 /*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
380
381 #define UV_LOCAL_MMR_SIZE ( \
382 is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
383 is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
384 is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
385 /*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
386
387 #define UV_GLOBAL_MMR32_SIZE ( \
388 is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \
389 is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
390 is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
391 /*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
392
393 #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
394
395 #define UV_GLOBAL_GRU_MMR_BASE 0x4000000
396
397 #define UV_GLOBAL_MMR32_PNODE_SHIFT 15
398 #define _UV_GLOBAL_MMR64_PNODE_SHIFT 26
399 #define UV_GLOBAL_MMR64_PNODE_SHIFT (uv_hub_info->global_mmr_shift)
400
401 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
402
403 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \
404 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
405
406 #define UVH_APICID 0x002D0E00L
407 #define UV_APIC_PNODE_SHIFT 6
408
409 #define UV_APICID_HIBIT_MASK 0xffff0000
410
411 /* Local Bus from cpu's perspective */
412 #define LOCAL_BUS_BASE 0x1c00000
413 #define LOCAL_BUS_SIZE (4 * 1024 * 1024)
414
415 /*
416 * System Controller Interface Reg
417 *
418 * Note there are NO leds on a UV system. This register is only
419 * used by the system controller to monitor system-wide operation.
420 * There are 64 regs per node. With Nahelem cpus (2 cores per node,
421 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
422 * a node.
423 *
424 * The window is located at top of ACPI MMR space
425 */
426 #define SCIR_WINDOW_COUNT 64
427 #define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \
428 LOCAL_BUS_SIZE - \
429 SCIR_WINDOW_COUNT)
430
431 #define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */
432 #define SCIR_CPU_ACTIVITY 0x02 /* not idle */
433 #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
434
435 /* Loop through all installed blades */
436 #define for_each_possible_blade(bid) \
437 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
438
439 /*
440 * Macros for converting between kernel virtual addresses, socket local physical
441 * addresses, and UV global physical addresses.
442 * Note: use the standard __pa() & __va() macros for converting
443 * between socket virtual and socket physical addresses.
444 */
445
446 /* global bits offset - number of local address bits in gpa for this UV arch */
uv_gpa_shift(void)447 static inline unsigned int uv_gpa_shift(void)
448 {
449 return uv_hub_info->gpa_shift;
450 }
451 #define _uv_gpa_shift
452
453 /* Find node that has the address range that contains global address */
uv_gam_range(unsigned long pa)454 static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa)
455 {
456 struct uv_gam_range_s *gr = uv_hub_info->gr_table;
457 unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT;
458 int i, num = uv_hub_info->gr_table_len;
459
460 if (gr) {
461 for (i = 0; i < num; i++, gr++) {
462 if (pal < gr->limit)
463 return gr;
464 }
465 }
466 pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr);
467 BUG();
468 }
469
470 /* Return base address of node that contains global address */
uv_gam_range_base(unsigned long pa)471 static inline unsigned long uv_gam_range_base(unsigned long pa)
472 {
473 struct uv_gam_range_s *gr = uv_gam_range(pa);
474 int base = gr->base;
475
476 if (base < 0)
477 return 0UL;
478
479 return uv_hub_info->gr_table[base].limit;
480 }
481
482 /* socket phys RAM --> UV global NASID (UV4+) */
uv_soc_phys_ram_to_nasid(unsigned long paddr)483 static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr)
484 {
485 return uv_gam_range(paddr)->nasid;
486 }
487 #define _uv_soc_phys_ram_to_nasid
488
489 /* socket virtual --> UV global NASID (UV4+) */
uv_gpa_nasid(void * v)490 static inline unsigned long uv_gpa_nasid(void *v)
491 {
492 return uv_soc_phys_ram_to_nasid(__pa(v));
493 }
494
495 /* socket phys RAM --> UV global physical address */
uv_soc_phys_ram_to_gpa(unsigned long paddr)496 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
497 {
498 unsigned int m_val = uv_hub_info->m_val;
499
500 if (paddr < uv_hub_info->lowmem_remap_top)
501 paddr |= uv_hub_info->lowmem_remap_base;
502
503 if (m_val) {
504 paddr |= uv_hub_info->gnode_upper;
505 paddr = ((paddr << uv_hub_info->m_shift)
506 >> uv_hub_info->m_shift) |
507 ((paddr >> uv_hub_info->m_val)
508 << uv_hub_info->n_lshift);
509 } else {
510 paddr |= uv_soc_phys_ram_to_nasid(paddr)
511 << uv_hub_info->gpa_shift;
512 }
513 return paddr;
514 }
515
516 /* socket virtual --> UV global physical address */
uv_gpa(void * v)517 static inline unsigned long uv_gpa(void *v)
518 {
519 return uv_soc_phys_ram_to_gpa(__pa(v));
520 }
521
522 /* Top two bits indicate the requested address is in MMR space. */
523 static inline int
uv_gpa_in_mmr_space(unsigned long gpa)524 uv_gpa_in_mmr_space(unsigned long gpa)
525 {
526 return (gpa >> 62) == 0x3UL;
527 }
528
529 /* UV global physical address --> socket phys RAM */
uv_gpa_to_soc_phys_ram(unsigned long gpa)530 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
531 {
532 unsigned long paddr;
533 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
534 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
535 unsigned int m_val = uv_hub_info->m_val;
536
537 if (m_val)
538 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
539 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
540
541 paddr = gpa & uv_hub_info->gpa_mask;
542 if (paddr >= remap_base && paddr < remap_base + remap_top)
543 paddr -= remap_base;
544 return paddr;
545 }
546
547 /* gpa -> gnode */
uv_gpa_to_gnode(unsigned long gpa)548 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
549 {
550 unsigned int n_lshift = uv_hub_info->n_lshift;
551
552 if (n_lshift)
553 return gpa >> n_lshift;
554
555 return uv_gam_range(gpa)->nasid >> 1;
556 }
557
558 /* gpa -> pnode */
uv_gpa_to_pnode(unsigned long gpa)559 static inline int uv_gpa_to_pnode(unsigned long gpa)
560 {
561 return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask;
562 }
563
564 /* gpa -> node offset */
uv_gpa_to_offset(unsigned long gpa)565 static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
566 {
567 unsigned int m_shift = uv_hub_info->m_shift;
568
569 if (m_shift)
570 return (gpa << m_shift) >> m_shift;
571
572 return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa);
573 }
574
575 /* Convert socket to node */
_uv_socket_to_node(int socket,unsigned short * s2nid)576 static inline int _uv_socket_to_node(int socket, unsigned short *s2nid)
577 {
578 return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket;
579 }
580
uv_socket_to_node(int socket)581 static inline int uv_socket_to_node(int socket)
582 {
583 return _uv_socket_to_node(socket, uv_hub_info->socket_to_node);
584 }
585
586 /* pnode, offset --> socket virtual */
uv_pnode_offset_to_vaddr(int pnode,unsigned long offset)587 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
588 {
589 unsigned int m_val = uv_hub_info->m_val;
590 unsigned long base;
591 unsigned short sockid, node, *p2s;
592
593 if (m_val)
594 return __va(((unsigned long)pnode << m_val) | offset);
595
596 p2s = uv_hub_info->pnode_to_socket;
597 sockid = p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode;
598 node = uv_socket_to_node(sockid);
599
600 /* limit address of previous socket is our base, except node 0 is 0 */
601 if (!node)
602 return __va((unsigned long)offset);
603
604 base = (unsigned long)(uv_hub_info->gr_table[node - 1].limit);
605 return __va(base << UV_GAM_RANGE_SHFT | offset);
606 }
607
608 /* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */
uv_apicid_to_pnode(int apicid)609 static inline int uv_apicid_to_pnode(int apicid)
610 {
611 int pnode = apicid >> uv_hub_info->apic_pnode_shift;
612 unsigned short *s2pn = uv_hub_info->socket_to_pnode;
613
614 return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
615 }
616
617 /* Convert an apicid to the socket number on the blade */
uv_apicid_to_socket(int apicid)618 static inline int uv_apicid_to_socket(int apicid)
619 {
620 if (is_uv1_hub())
621 return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
622 else
623 return 0;
624 }
625
626 /*
627 * Access global MMRs using the low memory MMR32 space. This region supports
628 * faster MMR access but not all MMRs are accessible in this space.
629 */
uv_global_mmr32_address(int pnode,unsigned long offset)630 static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
631 {
632 return __va(UV_GLOBAL_MMR32_BASE |
633 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
634 }
635
uv_write_global_mmr32(int pnode,unsigned long offset,unsigned long val)636 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
637 {
638 writeq(val, uv_global_mmr32_address(pnode, offset));
639 }
640
uv_read_global_mmr32(int pnode,unsigned long offset)641 static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
642 {
643 return readq(uv_global_mmr32_address(pnode, offset));
644 }
645
646 /*
647 * Access Global MMR space using the MMR space located at the top of physical
648 * memory.
649 */
uv_global_mmr64_address(int pnode,unsigned long offset)650 static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
651 {
652 return __va(UV_GLOBAL_MMR64_BASE |
653 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
654 }
655
uv_write_global_mmr64(int pnode,unsigned long offset,unsigned long val)656 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
657 {
658 writeq(val, uv_global_mmr64_address(pnode, offset));
659 }
660
uv_read_global_mmr64(int pnode,unsigned long offset)661 static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
662 {
663 return readq(uv_global_mmr64_address(pnode, offset));
664 }
665
uv_write_global_mmr8(int pnode,unsigned long offset,unsigned char val)666 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
667 {
668 writeb(val, uv_global_mmr64_address(pnode, offset));
669 }
670
uv_read_global_mmr8(int pnode,unsigned long offset)671 static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
672 {
673 return readb(uv_global_mmr64_address(pnode, offset));
674 }
675
676 /*
677 * Access hub local MMRs. Faster than using global space but only local MMRs
678 * are accessible.
679 */
uv_local_mmr_address(unsigned long offset)680 static inline unsigned long *uv_local_mmr_address(unsigned long offset)
681 {
682 return __va(UV_LOCAL_MMR_BASE | offset);
683 }
684
uv_read_local_mmr(unsigned long offset)685 static inline unsigned long uv_read_local_mmr(unsigned long offset)
686 {
687 return readq(uv_local_mmr_address(offset));
688 }
689
uv_write_local_mmr(unsigned long offset,unsigned long val)690 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
691 {
692 writeq(val, uv_local_mmr_address(offset));
693 }
694
uv_read_local_mmr8(unsigned long offset)695 static inline unsigned char uv_read_local_mmr8(unsigned long offset)
696 {
697 return readb(uv_local_mmr_address(offset));
698 }
699
uv_write_local_mmr8(unsigned long offset,unsigned char val)700 static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
701 {
702 writeb(val, uv_local_mmr_address(offset));
703 }
704
705 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
uv_blade_processor_id(void)706 static inline int uv_blade_processor_id(void)
707 {
708 return uv_cpu_info->blade_cpu_id;
709 }
710
711 /* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */
uv_cpu_blade_processor_id(int cpu)712 static inline int uv_cpu_blade_processor_id(int cpu)
713 {
714 return uv_cpu_info_per(cpu)->blade_cpu_id;
715 }
716 #define _uv_cpu_blade_processor_id 1 /* indicate function available */
717
718 /* Blade number to Node number (UV1..UV4 is 1:1) */
uv_blade_to_node(int blade)719 static inline int uv_blade_to_node(int blade)
720 {
721 return blade;
722 }
723
724 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
uv_numa_blade_id(void)725 static inline int uv_numa_blade_id(void)
726 {
727 return uv_hub_info->numa_blade_id;
728 }
729
730 /*
731 * Convert linux node number to the UV blade number.
732 * .. Currently for UV1 thru UV4 the node and the blade are identical.
733 * .. If this changes then you MUST check references to this function!
734 */
uv_node_to_blade_id(int nid)735 static inline int uv_node_to_blade_id(int nid)
736 {
737 return nid;
738 }
739
740 /* Convert a cpu number to the the UV blade number */
uv_cpu_to_blade_id(int cpu)741 static inline int uv_cpu_to_blade_id(int cpu)
742 {
743 return uv_node_to_blade_id(cpu_to_node(cpu));
744 }
745
746 /* Convert a blade id to the PNODE of the blade */
uv_blade_to_pnode(int bid)747 static inline int uv_blade_to_pnode(int bid)
748 {
749 return uv_hub_info_list(uv_blade_to_node(bid))->pnode;
750 }
751
752 /* Nid of memory node on blade. -1 if no blade-local memory */
uv_blade_to_memory_nid(int bid)753 static inline int uv_blade_to_memory_nid(int bid)
754 {
755 return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid;
756 }
757
758 /* Determine the number of possible cpus on a blade */
uv_blade_nr_possible_cpus(int bid)759 static inline int uv_blade_nr_possible_cpus(int bid)
760 {
761 return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus;
762 }
763
764 /* Determine the number of online cpus on a blade */
uv_blade_nr_online_cpus(int bid)765 static inline int uv_blade_nr_online_cpus(int bid)
766 {
767 return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus;
768 }
769
770 /* Convert a cpu id to the PNODE of the blade containing the cpu */
uv_cpu_to_pnode(int cpu)771 static inline int uv_cpu_to_pnode(int cpu)
772 {
773 return uv_cpu_hub_info(cpu)->pnode;
774 }
775
776 /* Convert a linux node number to the PNODE of the blade */
uv_node_to_pnode(int nid)777 static inline int uv_node_to_pnode(int nid)
778 {
779 return uv_hub_info_list(nid)->pnode;
780 }
781
782 /* Maximum possible number of blades */
783 extern short uv_possible_blades;
uv_num_possible_blades(void)784 static inline int uv_num_possible_blades(void)
785 {
786 return uv_possible_blades;
787 }
788
789 /* Per Hub NMI support */
790 extern void uv_nmi_setup(void);
791 extern void uv_nmi_setup_hubless(void);
792
793 /* BIOS/Kernel flags exchange MMR */
794 #define UVH_BIOS_KERNEL_MMR UVH_SCRATCH5
795 #define UVH_BIOS_KERNEL_MMR_ALIAS UVH_SCRATCH5_ALIAS
796 #define UVH_BIOS_KERNEL_MMR_ALIAS_2 UVH_SCRATCH5_ALIAS_2
797
798 /* TSC sync valid, set by BIOS */
799 #define UVH_TSC_SYNC_MMR UVH_BIOS_KERNEL_MMR
800 #define UVH_TSC_SYNC_SHIFT 10
801 #define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */
802 #define UVH_TSC_SYNC_MASK 3 /* 0011 */
803 #define UVH_TSC_SYNC_VALID 3 /* 0011 */
804 #define UVH_TSC_SYNC_INVALID 2 /* 0010 */
805
806 /* BMC sets a bit this MMR non-zero before sending an NMI */
807 #define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR
808 #define UVH_NMI_MMR_CLEAR UVH_BIOS_KERNEL_MMR_ALIAS
809 #define UVH_NMI_MMR_SHIFT 63
810 #define UVH_NMI_MMR_TYPE "SCRATCH5"
811
812 /* Newer SMM NMI handler, not present in all systems */
813 #define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
814 #define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
815 #define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
816 #define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
817
818 /* Non-zero indicates newer SMM NMI handler present */
819 #define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
820
821 /* Indicates to BIOS that we want to use the newer SMM NMI handler */
822 #define UVH_NMI_MMRX_REQ UVH_BIOS_KERNEL_MMR_ALIAS_2
823 #define UVH_NMI_MMRX_REQ_SHIFT 62
824
825 struct uv_hub_nmi_s {
826 raw_spinlock_t nmi_lock;
827 atomic_t in_nmi; /* flag this node in UV NMI IRQ */
828 atomic_t cpu_owner; /* last locker of this struct */
829 atomic_t read_mmr_count; /* count of MMR reads */
830 atomic_t nmi_count; /* count of true UV NMIs */
831 unsigned long nmi_value; /* last value read from NMI MMR */
832 bool hub_present; /* false means UV hubless system */
833 bool pch_owner; /* indicates this hub owns PCH */
834 };
835
836 struct uv_cpu_nmi_s {
837 struct uv_hub_nmi_s *hub;
838 int state;
839 int pinging;
840 int queries;
841 int pings;
842 };
843
844 DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
845
846 #define uv_hub_nmi this_cpu_read(uv_cpu_nmi.hub)
847 #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
848 #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
849
850 /* uv_cpu_nmi_states */
851 #define UV_NMI_STATE_OUT 0
852 #define UV_NMI_STATE_IN 1
853 #define UV_NMI_STATE_DUMP 2
854 #define UV_NMI_STATE_DUMP_DONE 3
855
856 /* Update SCIR state */
uv_set_scir_bits(unsigned char value)857 static inline void uv_set_scir_bits(unsigned char value)
858 {
859 if (uv_scir_info->state != value) {
860 uv_scir_info->state = value;
861 uv_write_local_mmr8(uv_scir_info->offset, value);
862 }
863 }
864
uv_scir_offset(int apicid)865 static inline unsigned long uv_scir_offset(int apicid)
866 {
867 return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
868 }
869
uv_set_cpu_scir_bits(int cpu,unsigned char value)870 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
871 {
872 if (uv_cpu_scir_info(cpu)->state != value) {
873 uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
874 uv_cpu_scir_info(cpu)->offset, value);
875 uv_cpu_scir_info(cpu)->state = value;
876 }
877 }
878
879 extern unsigned int uv_apicid_hibits;
uv_hub_ipi_value(int apicid,int vector,int mode)880 static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
881 {
882 apicid |= uv_apicid_hibits;
883 return (1UL << UVH_IPI_INT_SEND_SHFT) |
884 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
885 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
886 (vector << UVH_IPI_INT_VECTOR_SHFT);
887 }
888
uv_hub_send_ipi(int pnode,int apicid,int vector)889 static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
890 {
891 unsigned long val;
892 unsigned long dmode = dest_Fixed;
893
894 if (vector == NMI_VECTOR)
895 dmode = dest_NMI;
896
897 val = uv_hub_ipi_value(apicid, vector, dmode);
898 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
899 }
900
901 /*
902 * Get the minimum revision number of the hub chips within the partition.
903 * (See UVx_HUB_REVISION_BASE above for specific values.)
904 */
uv_get_min_hub_revision_id(void)905 static inline int uv_get_min_hub_revision_id(void)
906 {
907 return uv_hub_info->hub_revision;
908 }
909
910 #endif /* CONFIG_X86_64 */
911 #endif /* _ASM_X86_UV_UV_HUB_H */
912