1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #ifndef _GDMA_H
5 #define _GDMA_H
6
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9
10 #include "shm_channel.h"
11
12 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
14 */
15
16 enum gdma_request_type {
17 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
18 GDMA_QUERY_MAX_RESOURCES = 2,
19 GDMA_LIST_DEVICES = 3,
20 GDMA_REGISTER_DEVICE = 4,
21 GDMA_DEREGISTER_DEVICE = 5,
22 GDMA_GENERATE_TEST_EQE = 10,
23 GDMA_CREATE_QUEUE = 12,
24 GDMA_DISABLE_QUEUE = 13,
25 GDMA_CREATE_DMA_REGION = 25,
26 GDMA_DMA_REGION_ADD_PAGES = 26,
27 GDMA_DESTROY_DMA_REGION = 27,
28 };
29
30 enum gdma_queue_type {
31 GDMA_INVALID_QUEUE,
32 GDMA_SQ,
33 GDMA_RQ,
34 GDMA_CQ,
35 GDMA_EQ,
36 };
37
38 enum gdma_work_request_flags {
39 GDMA_WR_NONE = 0,
40 GDMA_WR_OOB_IN_SGL = BIT(0),
41 GDMA_WR_PAD_BY_SGE0 = BIT(1),
42 };
43
44 enum gdma_eqe_type {
45 GDMA_EQE_COMPLETION = 3,
46 GDMA_EQE_TEST_EVENT = 64,
47 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
48 GDMA_EQE_HWC_INIT_DATA = 130,
49 GDMA_EQE_HWC_INIT_DONE = 131,
50 };
51
52 enum {
53 GDMA_DEVICE_NONE = 0,
54 GDMA_DEVICE_HWC = 1,
55 GDMA_DEVICE_MANA = 2,
56 };
57
58 struct gdma_resource {
59 /* Protect the bitmap */
60 spinlock_t lock;
61
62 /* The bitmap size in bits. */
63 u32 size;
64
65 /* The bitmap tracks the resources. */
66 unsigned long *map;
67 };
68
69 union gdma_doorbell_entry {
70 u64 as_uint64;
71
72 struct {
73 u64 id : 24;
74 u64 reserved : 8;
75 u64 tail_ptr : 31;
76 u64 arm : 1;
77 } cq;
78
79 struct {
80 u64 id : 24;
81 u64 wqe_cnt : 8;
82 u64 tail_ptr : 32;
83 } rq;
84
85 struct {
86 u64 id : 24;
87 u64 reserved : 8;
88 u64 tail_ptr : 32;
89 } sq;
90
91 struct {
92 u64 id : 16;
93 u64 reserved : 16;
94 u64 tail_ptr : 31;
95 u64 arm : 1;
96 } eq;
97 }; /* HW DATA */
98
99 struct gdma_msg_hdr {
100 u32 hdr_type;
101 u32 msg_type;
102 u16 msg_version;
103 u16 hwc_msg_id;
104 u32 msg_size;
105 }; /* HW DATA */
106
107 struct gdma_dev_id {
108 union {
109 struct {
110 u16 type;
111 u16 instance;
112 };
113
114 u32 as_uint32;
115 };
116 }; /* HW DATA */
117
118 struct gdma_req_hdr {
119 struct gdma_msg_hdr req;
120 struct gdma_msg_hdr resp; /* The expected response */
121 struct gdma_dev_id dev_id;
122 u32 activity_id;
123 }; /* HW DATA */
124
125 struct gdma_resp_hdr {
126 struct gdma_msg_hdr response;
127 struct gdma_dev_id dev_id;
128 u32 activity_id;
129 u32 status;
130 u32 reserved;
131 }; /* HW DATA */
132
133 struct gdma_general_req {
134 struct gdma_req_hdr hdr;
135 }; /* HW DATA */
136
137 #define GDMA_MESSAGE_V1 1
138
139 struct gdma_general_resp {
140 struct gdma_resp_hdr hdr;
141 }; /* HW DATA */
142
143 #define GDMA_STANDARD_HEADER_TYPE 0
144
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)145 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
146 u32 req_size, u32 resp_size)
147 {
148 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
149 hdr->req.msg_type = code;
150 hdr->req.msg_version = GDMA_MESSAGE_V1;
151 hdr->req.msg_size = req_size;
152
153 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
154 hdr->resp.msg_type = code;
155 hdr->resp.msg_version = GDMA_MESSAGE_V1;
156 hdr->resp.msg_size = resp_size;
157 }
158
159 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
160 struct gdma_sge {
161 u64 address;
162 u32 mem_key;
163 u32 size;
164 }; /* HW DATA */
165
166 struct gdma_wqe_request {
167 struct gdma_sge *sgl;
168 u32 num_sge;
169
170 u32 inline_oob_size;
171 const void *inline_oob_data;
172
173 u32 flags;
174 u32 client_data_unit;
175 };
176
177 enum gdma_page_type {
178 GDMA_PAGE_TYPE_4K,
179 };
180
181 #define GDMA_INVALID_DMA_REGION 0
182
183 struct gdma_mem_info {
184 struct device *dev;
185
186 dma_addr_t dma_handle;
187 void *virt_addr;
188 u64 length;
189
190 /* Allocated by the PF driver */
191 u64 gdma_region;
192 };
193
194 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
195
196 struct gdma_dev {
197 struct gdma_context *gdma_context;
198
199 struct gdma_dev_id dev_id;
200
201 u32 pdid;
202 u32 doorbell;
203 u32 gpa_mkey;
204
205 /* GDMA driver specific pointer */
206 void *driver_data;
207 };
208
209 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
210
211 #define GDMA_CQE_SIZE 64
212 #define GDMA_EQE_SIZE 16
213 #define GDMA_MAX_SQE_SIZE 512
214 #define GDMA_MAX_RQE_SIZE 256
215
216 #define GDMA_COMP_DATA_SIZE 0x3C
217
218 #define GDMA_EVENT_DATA_SIZE 0xC
219
220 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
221 #define GDMA_WQE_BU_SIZE 32
222
223 #define INVALID_PDID UINT_MAX
224 #define INVALID_DOORBELL UINT_MAX
225 #define INVALID_MEM_KEY UINT_MAX
226 #define INVALID_QUEUE_ID UINT_MAX
227 #define INVALID_PCI_MSIX_INDEX UINT_MAX
228
229 struct gdma_comp {
230 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
231 u32 wq_num;
232 bool is_sq;
233 };
234
235 struct gdma_event {
236 u32 details[GDMA_EVENT_DATA_SIZE / 4];
237 u8 type;
238 };
239
240 struct gdma_queue;
241
242 struct mana_eq {
243 struct gdma_queue *eq;
244 };
245
246 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
247 struct gdma_event *e);
248
249 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
250
251 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
252 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
253 * driver increases the 'head' in BUs rather than in bytes, and notifies
254 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
255 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
256 *
257 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
258 * processed, the driver increases the 'tail' to indicate that WQEs have
259 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
260 *
261 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
262 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
263 * the owner bits mechanism to detect if the queue has become empty.
264 */
265 struct gdma_queue {
266 struct gdma_dev *gdma_dev;
267
268 enum gdma_queue_type type;
269 u32 id;
270
271 struct gdma_mem_info mem_info;
272
273 void *queue_mem_ptr;
274 u32 queue_size;
275
276 bool monitor_avl_buf;
277
278 u32 head;
279 u32 tail;
280
281 /* Extra fields specific to EQ/CQ. */
282 union {
283 struct {
284 bool disable_needed;
285
286 gdma_eq_callback *callback;
287 void *context;
288
289 unsigned int msix_index;
290
291 u32 log2_throttle_limit;
292 } eq;
293
294 struct {
295 gdma_cq_callback *callback;
296 void *context;
297
298 struct gdma_queue *parent; /* For CQ/EQ relationship */
299 } cq;
300 };
301 };
302
303 struct gdma_queue_spec {
304 enum gdma_queue_type type;
305 bool monitor_avl_buf;
306 unsigned int queue_size;
307
308 /* Extra fields specific to EQ/CQ. */
309 union {
310 struct {
311 gdma_eq_callback *callback;
312 void *context;
313
314 unsigned long log2_throttle_limit;
315 } eq;
316
317 struct {
318 gdma_cq_callback *callback;
319 void *context;
320
321 struct gdma_queue *parent_eq;
322
323 } cq;
324 };
325 };
326
327 struct gdma_irq_context {
328 void (*handler)(void *arg);
329 void *arg;
330 };
331
332 struct gdma_context {
333 struct device *dev;
334
335 /* Per-vPort max number of queues */
336 unsigned int max_num_queues;
337 unsigned int max_num_msix;
338 unsigned int num_msix_usable;
339 struct gdma_resource msix_resource;
340 struct gdma_irq_context *irq_contexts;
341
342 /* This maps a CQ index to the queue structure. */
343 unsigned int max_num_cqs;
344 struct gdma_queue **cq_table;
345
346 /* Protect eq_test_event and test_event_eq_id */
347 struct mutex eq_test_event_mutex;
348 struct completion eq_test_event;
349 u32 test_event_eq_id;
350
351 bool is_pf;
352 void __iomem *bar0_va;
353 void __iomem *shm_base;
354 void __iomem *db_page_base;
355 u32 db_page_size;
356
357 /* Shared memory chanenl (used to bootstrap HWC) */
358 struct shm_channel shm_channel;
359
360 /* Hardware communication channel (HWC) */
361 struct gdma_dev hwc;
362
363 /* Azure network adapter */
364 struct gdma_dev mana;
365 };
366
367 #define MAX_NUM_GDMA_DEVICES 4
368
mana_gd_is_mana(struct gdma_dev * gd)369 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
370 {
371 return gd->dev_id.type == GDMA_DEVICE_MANA;
372 }
373
mana_gd_is_hwc(struct gdma_dev * gd)374 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
375 {
376 return gd->dev_id.type == GDMA_DEVICE_HWC;
377 }
378
379 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
380 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
381
382 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
383
384 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
385 const struct gdma_queue_spec *spec,
386 struct gdma_queue **queue_ptr);
387
388 int mana_gd_create_mana_eq(struct gdma_dev *gd,
389 const struct gdma_queue_spec *spec,
390 struct gdma_queue **queue_ptr);
391
392 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
393 const struct gdma_queue_spec *spec,
394 struct gdma_queue **queue_ptr);
395
396 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
397
398 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
399
400 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
401
402 struct gdma_wqe {
403 u32 reserved :24;
404 u32 last_vbytes :8;
405
406 union {
407 u32 flags;
408
409 struct {
410 u32 num_sge :8;
411 u32 inline_oob_size_div4:3;
412 u32 client_oob_in_sgl :1;
413 u32 reserved1 :4;
414 u32 client_data_unit :14;
415 u32 reserved2 :2;
416 };
417 };
418 }; /* HW DATA */
419
420 #define INLINE_OOB_SMALL_SIZE 8
421 #define INLINE_OOB_LARGE_SIZE 24
422
423 #define MAX_TX_WQE_SIZE 512
424 #define MAX_RX_WQE_SIZE 256
425
426 struct gdma_cqe {
427 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
428
429 union {
430 u32 as_uint32;
431
432 struct {
433 u32 wq_num : 24;
434 u32 is_sq : 1;
435 u32 reserved : 4;
436 u32 owner_bits : 3;
437 };
438 } cqe_info;
439 }; /* HW DATA */
440
441 #define GDMA_CQE_OWNER_BITS 3
442
443 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
444
445 #define SET_ARM_BIT 1
446
447 #define GDMA_EQE_OWNER_BITS 3
448
449 union gdma_eqe_info {
450 u32 as_uint32;
451
452 struct {
453 u32 type : 8;
454 u32 reserved1 : 8;
455 u32 client_id : 2;
456 u32 reserved2 : 11;
457 u32 owner_bits : 3;
458 };
459 }; /* HW DATA */
460
461 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
462 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
463
464 struct gdma_eqe {
465 u32 details[GDMA_EVENT_DATA_SIZE / 4];
466 u32 eqe_info;
467 }; /* HW DATA */
468
469 #define GDMA_REG_DB_PAGE_OFFSET 8
470 #define GDMA_REG_DB_PAGE_SIZE 0x10
471 #define GDMA_REG_SHM_OFFSET 0x18
472
473 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
474 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
475 #define GDMA_PF_REG_SHM_OFF 0x70
476
477 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
478
479 #define MANA_PF_DEVICE_ID 0x00B9
480 #define MANA_VF_DEVICE_ID 0x00BA
481
482 struct gdma_posted_wqe_info {
483 u32 wqe_size_in_bu;
484 };
485
486 /* GDMA_GENERATE_TEST_EQE */
487 struct gdma_generate_test_event_req {
488 struct gdma_req_hdr hdr;
489 u32 queue_index;
490 }; /* HW DATA */
491
492 /* GDMA_VERIFY_VF_DRIVER_VERSION */
493 enum {
494 GDMA_PROTOCOL_V1 = 1,
495 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
496 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
497 };
498
499 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
500
501 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
502 * so the driver is able to reliably support features like busy_poll.
503 */
504 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
505
506 #define GDMA_DRV_CAP_FLAGS1 \
507 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
508 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
509
510 #define GDMA_DRV_CAP_FLAGS2 0
511
512 #define GDMA_DRV_CAP_FLAGS3 0
513
514 #define GDMA_DRV_CAP_FLAGS4 0
515
516 struct gdma_verify_ver_req {
517 struct gdma_req_hdr hdr;
518
519 /* Mandatory fields required for protocol establishment */
520 u64 protocol_ver_min;
521 u64 protocol_ver_max;
522
523 /* Gdma Driver Capability Flags */
524 u64 gd_drv_cap_flags1;
525 u64 gd_drv_cap_flags2;
526 u64 gd_drv_cap_flags3;
527 u64 gd_drv_cap_flags4;
528
529 /* Advisory fields */
530 u64 drv_ver;
531 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
532 u32 reserved;
533 u32 os_ver_major;
534 u32 os_ver_minor;
535 u32 os_ver_build;
536 u32 os_ver_platform;
537 u64 reserved_2;
538 u8 os_ver_str1[128];
539 u8 os_ver_str2[128];
540 u8 os_ver_str3[128];
541 u8 os_ver_str4[128];
542 }; /* HW DATA */
543
544 struct gdma_verify_ver_resp {
545 struct gdma_resp_hdr hdr;
546 u64 gdma_protocol_ver;
547 u64 pf_cap_flags1;
548 u64 pf_cap_flags2;
549 u64 pf_cap_flags3;
550 u64 pf_cap_flags4;
551 }; /* HW DATA */
552
553 /* GDMA_QUERY_MAX_RESOURCES */
554 struct gdma_query_max_resources_resp {
555 struct gdma_resp_hdr hdr;
556 u32 status;
557 u32 max_sq;
558 u32 max_rq;
559 u32 max_cq;
560 u32 max_eq;
561 u32 max_db;
562 u32 max_mst;
563 u32 max_cq_mod_ctx;
564 u32 max_mod_cq;
565 u32 max_msix;
566 }; /* HW DATA */
567
568 /* GDMA_LIST_DEVICES */
569 struct gdma_list_devices_resp {
570 struct gdma_resp_hdr hdr;
571 u32 num_of_devs;
572 u32 reserved;
573 struct gdma_dev_id devs[64];
574 }; /* HW DATA */
575
576 /* GDMA_REGISTER_DEVICE */
577 struct gdma_register_device_resp {
578 struct gdma_resp_hdr hdr;
579 u32 pdid;
580 u32 gpa_mkey;
581 u32 db_id;
582 }; /* HW DATA */
583
584 /* GDMA_CREATE_QUEUE */
585 struct gdma_create_queue_req {
586 struct gdma_req_hdr hdr;
587 u32 type;
588 u32 reserved1;
589 u32 pdid;
590 u32 doolbell_id;
591 u64 gdma_region;
592 u32 reserved2;
593 u32 queue_size;
594 u32 log2_throttle_limit;
595 u32 eq_pci_msix_index;
596 u32 cq_mod_ctx_id;
597 u32 cq_parent_eq_id;
598 u8 rq_drop_on_overrun;
599 u8 rq_err_on_wqe_overflow;
600 u8 rq_chain_rec_wqes;
601 u8 sq_hw_db;
602 u32 reserved3;
603 }; /* HW DATA */
604
605 struct gdma_create_queue_resp {
606 struct gdma_resp_hdr hdr;
607 u32 queue_index;
608 }; /* HW DATA */
609
610 /* GDMA_DISABLE_QUEUE */
611 struct gdma_disable_queue_req {
612 struct gdma_req_hdr hdr;
613 u32 type;
614 u32 queue_index;
615 u32 alloc_res_id_on_creation;
616 }; /* HW DATA */
617
618 /* GDMA_CREATE_DMA_REGION */
619 struct gdma_create_dma_region_req {
620 struct gdma_req_hdr hdr;
621
622 /* The total size of the DMA region */
623 u64 length;
624
625 /* The offset in the first page */
626 u32 offset_in_page;
627
628 /* enum gdma_page_type */
629 u32 gdma_page_type;
630
631 /* The total number of pages */
632 u32 page_count;
633
634 /* If page_addr_list_len is smaller than page_count,
635 * the remaining page addresses will be added via the
636 * message GDMA_DMA_REGION_ADD_PAGES.
637 */
638 u32 page_addr_list_len;
639 u64 page_addr_list[];
640 }; /* HW DATA */
641
642 struct gdma_create_dma_region_resp {
643 struct gdma_resp_hdr hdr;
644 u64 gdma_region;
645 }; /* HW DATA */
646
647 /* GDMA_DMA_REGION_ADD_PAGES */
648 struct gdma_dma_region_add_pages_req {
649 struct gdma_req_hdr hdr;
650
651 u64 gdma_region;
652
653 u32 page_addr_list_len;
654 u32 reserved3;
655
656 u64 page_addr_list[];
657 }; /* HW DATA */
658
659 /* GDMA_DESTROY_DMA_REGION */
660 struct gdma_destroy_dma_region_req {
661 struct gdma_req_hdr hdr;
662
663 u64 gdma_region;
664 }; /* HW DATA */
665
666 int mana_gd_verify_vf_version(struct pci_dev *pdev);
667
668 int mana_gd_register_device(struct gdma_dev *gd);
669 int mana_gd_deregister_device(struct gdma_dev *gd);
670
671 int mana_gd_post_work_request(struct gdma_queue *wq,
672 const struct gdma_wqe_request *wqe_req,
673 struct gdma_posted_wqe_info *wqe_info);
674
675 int mana_gd_post_and_ring(struct gdma_queue *queue,
676 const struct gdma_wqe_request *wqe,
677 struct gdma_posted_wqe_info *wqe_info);
678
679 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
680 void mana_gd_free_res_map(struct gdma_resource *r);
681
682 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
683 struct gdma_queue *queue);
684
685 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
686 struct gdma_mem_info *gmi);
687
688 void mana_gd_free_memory(struct gdma_mem_info *gmi);
689
690 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
691 u32 resp_len, void *resp);
692 #endif /* _GDMA_H */
693