1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #ifndef _QED_CHAIN_H
8 #define _QED_CHAIN_H
9
10 #include <linux/types.h>
11 #include <asm/byteorder.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/qed/common_hsi.h>
17
18 enum qed_chain_mode {
19 /* Each Page contains a next pointer at its end */
20 QED_CHAIN_MODE_NEXT_PTR,
21
22 /* Chain is a single page (next ptr) is unrequired */
23 QED_CHAIN_MODE_SINGLE,
24
25 /* Page pointers are located in a side list */
26 QED_CHAIN_MODE_PBL,
27 };
28
29 enum qed_chain_use_mode {
30 QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
31 QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
32 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
33 };
34
35 enum qed_chain_cnt_type {
36 /* The chain's size/prod/cons are kept in 16-bit variables */
37 QED_CHAIN_CNT_TYPE_U16,
38
39 /* The chain's size/prod/cons are kept in 32-bit variables */
40 QED_CHAIN_CNT_TYPE_U32,
41 };
42
43 struct qed_chain_next {
44 struct regpair next_phys;
45 void *next_virt;
46 };
47
48 struct qed_chain_pbl_u16 {
49 u16 prod_page_idx;
50 u16 cons_page_idx;
51 };
52
53 struct qed_chain_pbl_u32 {
54 u32 prod_page_idx;
55 u32 cons_page_idx;
56 };
57
58 struct qed_chain_u16 {
59 /* Cyclic index of next element to produce/consme */
60 u16 prod_idx;
61 u16 cons_idx;
62 };
63
64 struct qed_chain_u32 {
65 /* Cyclic index of next element to produce/consme */
66 u32 prod_idx;
67 u32 cons_idx;
68 };
69
70 struct addr_tbl_entry {
71 void *virt_addr;
72 dma_addr_t dma_map;
73 };
74
75 struct qed_chain {
76 /* Fastpath portion of the chain - required for commands such
77 * as produce / consume.
78 */
79
80 /* Point to next element to produce/consume */
81 void *p_prod_elem;
82 void *p_cons_elem;
83
84 /* Fastpath portions of the PBL [if exists] */
85
86 struct {
87 /* Table for keeping the virtual and physical addresses of the
88 * chain pages, respectively to the physical addresses
89 * in the pbl table.
90 */
91 struct addr_tbl_entry *pp_addr_tbl;
92
93 union {
94 struct qed_chain_pbl_u16 u16;
95 struct qed_chain_pbl_u32 u32;
96 } c;
97 } pbl;
98
99 union {
100 struct qed_chain_u16 chain16;
101 struct qed_chain_u32 chain32;
102 } u;
103
104 /* Capacity counts only usable elements */
105 u32 capacity;
106 u32 page_cnt;
107
108 enum qed_chain_mode mode;
109
110 /* Elements information for fast calculations */
111 u16 elem_per_page;
112 u16 elem_per_page_mask;
113 u16 elem_size;
114 u16 next_page_mask;
115 u16 usable_per_page;
116 u8 elem_unusable;
117
118 enum qed_chain_cnt_type cnt_type;
119
120 /* Slowpath of the chain - required for initialization and destruction,
121 * but isn't involved in regular functionality.
122 */
123
124 u32 page_size;
125
126 /* Base address of a pre-allocated buffer for pbl */
127 struct {
128 __le64 *table_virt;
129 dma_addr_t table_phys;
130 size_t table_size;
131 } pbl_sp;
132
133 /* Address of first page of the chain - the address is required
134 * for fastpath operation [consume/produce] but only for the SINGLE
135 * flavour which isn't considered fastpath [== SPQ].
136 */
137 void *p_virt_addr;
138 dma_addr_t p_phys_addr;
139
140 /* Total number of elements [for entire chain] */
141 u32 size;
142
143 enum qed_chain_use_mode intended_use;
144
145 bool b_external_pbl;
146 };
147
148 struct qed_chain_init_params {
149 enum qed_chain_mode mode;
150 enum qed_chain_use_mode intended_use;
151 enum qed_chain_cnt_type cnt_type;
152
153 u32 page_size;
154 u32 num_elems;
155 size_t elem_size;
156
157 void *ext_pbl_virt;
158 dma_addr_t ext_pbl_phys;
159 };
160
161 #define QED_CHAIN_PAGE_SIZE SZ_4K
162
163 #define ELEMS_PER_PAGE(elem_size, page_size) \
164 ((page_size) / (elem_size))
165
166 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
167 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
168 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \
169 0)
170
171 #define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \
172 ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \
173 UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
174
175 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \
176 DIV_ROUND_UP((elem_cnt), \
177 USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
178
179 #define is_chain_u16(p) \
180 ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
181 #define is_chain_u32(p) \
182 ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
183
184 /* Accessors */
185
qed_chain_get_prod_idx(const struct qed_chain * chain)186 static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
187 {
188 return chain->u.chain16.prod_idx;
189 }
190
qed_chain_get_cons_idx(const struct qed_chain * chain)191 static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
192 {
193 return chain->u.chain16.cons_idx;
194 }
195
qed_chain_get_prod_idx_u32(const struct qed_chain * chain)196 static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
197 {
198 return chain->u.chain32.prod_idx;
199 }
200
qed_chain_get_cons_idx_u32(const struct qed_chain * chain)201 static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
202 {
203 return chain->u.chain32.cons_idx;
204 }
205
qed_chain_get_elem_used(const struct qed_chain * chain)206 static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
207 {
208 u32 prod = qed_chain_get_prod_idx(chain);
209 u32 cons = qed_chain_get_cons_idx(chain);
210 u16 elem_per_page = chain->elem_per_page;
211 u16 used;
212
213 if (prod < cons)
214 prod += (u32)U16_MAX + 1;
215
216 used = (u16)(prod - cons);
217 if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
218 used -= (u16)(prod / elem_per_page - cons / elem_per_page);
219
220 return used;
221 }
222
qed_chain_get_elem_left(const struct qed_chain * chain)223 static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
224 {
225 return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
226 }
227
qed_chain_get_elem_used_u32(const struct qed_chain * chain)228 static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
229 {
230 u64 prod = qed_chain_get_prod_idx_u32(chain);
231 u64 cons = qed_chain_get_cons_idx_u32(chain);
232 u16 elem_per_page = chain->elem_per_page;
233 u32 used;
234
235 if (prod < cons)
236 prod += (u64)U32_MAX + 1;
237
238 used = (u32)(prod - cons);
239 if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
240 used -= (u32)(prod / elem_per_page - cons / elem_per_page);
241
242 return used;
243 }
244
qed_chain_get_elem_left_u32(const struct qed_chain * chain)245 static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
246 {
247 return chain->capacity - qed_chain_get_elem_used_u32(chain);
248 }
249
qed_chain_get_usable_per_page(const struct qed_chain * chain)250 static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
251 {
252 return chain->usable_per_page;
253 }
254
qed_chain_get_unusable_per_page(const struct qed_chain * chain)255 static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
256 {
257 return chain->elem_unusable;
258 }
259
qed_chain_get_page_cnt(const struct qed_chain * chain)260 static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
261 {
262 return chain->page_cnt;
263 }
264
qed_chain_get_pbl_phys(const struct qed_chain * chain)265 static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
266 {
267 return chain->pbl_sp.table_phys;
268 }
269
270 /**
271 * @brief qed_chain_advance_page -
272 *
273 * Advance the next element accros pages for a linked chain
274 *
275 * @param p_chain
276 * @param p_next_elem
277 * @param idx_to_inc
278 * @param page_to_inc
279 */
280 static inline void
qed_chain_advance_page(struct qed_chain * p_chain,void ** p_next_elem,void * idx_to_inc,void * page_to_inc)281 qed_chain_advance_page(struct qed_chain *p_chain,
282 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
283 {
284 struct qed_chain_next *p_next = NULL;
285 u32 page_index = 0;
286
287 switch (p_chain->mode) {
288 case QED_CHAIN_MODE_NEXT_PTR:
289 p_next = *p_next_elem;
290 *p_next_elem = p_next->next_virt;
291 if (is_chain_u16(p_chain))
292 *(u16 *)idx_to_inc += p_chain->elem_unusable;
293 else
294 *(u32 *)idx_to_inc += p_chain->elem_unusable;
295 break;
296 case QED_CHAIN_MODE_SINGLE:
297 *p_next_elem = p_chain->p_virt_addr;
298 break;
299
300 case QED_CHAIN_MODE_PBL:
301 if (is_chain_u16(p_chain)) {
302 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
303 *(u16 *)page_to_inc = 0;
304 page_index = *(u16 *)page_to_inc;
305 } else {
306 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
307 *(u32 *)page_to_inc = 0;
308 page_index = *(u32 *)page_to_inc;
309 }
310 *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
311 }
312 }
313
314 #define is_unusable_idx(p, idx) \
315 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
316
317 #define is_unusable_idx_u32(p, idx) \
318 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
319 #define is_unusable_next_idx(p, idx) \
320 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
321 (p)->usable_per_page)
322
323 #define is_unusable_next_idx_u32(p, idx) \
324 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
325 (p)->usable_per_page)
326
327 #define test_and_skip(p, idx) \
328 do { \
329 if (is_chain_u16(p)) { \
330 if (is_unusable_idx(p, idx)) \
331 (p)->u.chain16.idx += (p)->elem_unusable; \
332 } else { \
333 if (is_unusable_idx_u32(p, idx)) \
334 (p)->u.chain32.idx += (p)->elem_unusable; \
335 } \
336 } while (0)
337
338 /**
339 * @brief qed_chain_return_produced -
340 *
341 * A chain in which the driver "Produces" elements should use this API
342 * to indicate previous produced elements are now consumed.
343 *
344 * @param p_chain
345 */
qed_chain_return_produced(struct qed_chain * p_chain)346 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
347 {
348 if (is_chain_u16(p_chain))
349 p_chain->u.chain16.cons_idx++;
350 else
351 p_chain->u.chain32.cons_idx++;
352 test_and_skip(p_chain, cons_idx);
353 }
354
355 /**
356 * @brief qed_chain_produce -
357 *
358 * A chain in which the driver "Produces" elements should use this to get
359 * a pointer to the next element which can be "Produced". It's driver
360 * responsibility to validate that the chain has room for new element.
361 *
362 * @param p_chain
363 *
364 * @return void*, a pointer to next element
365 */
qed_chain_produce(struct qed_chain * p_chain)366 static inline void *qed_chain_produce(struct qed_chain *p_chain)
367 {
368 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
369
370 if (is_chain_u16(p_chain)) {
371 if ((p_chain->u.chain16.prod_idx &
372 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
373 p_prod_idx = &p_chain->u.chain16.prod_idx;
374 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
375 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
376 p_prod_idx, p_prod_page_idx);
377 }
378 p_chain->u.chain16.prod_idx++;
379 } else {
380 if ((p_chain->u.chain32.prod_idx &
381 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
382 p_prod_idx = &p_chain->u.chain32.prod_idx;
383 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
384 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
385 p_prod_idx, p_prod_page_idx);
386 }
387 p_chain->u.chain32.prod_idx++;
388 }
389
390 p_ret = p_chain->p_prod_elem;
391 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
392 p_chain->elem_size);
393
394 return p_ret;
395 }
396
397 /**
398 * @brief qed_chain_get_capacity -
399 *
400 * Get the maximum number of BDs in chain
401 *
402 * @param p_chain
403 * @param num
404 *
405 * @return number of unusable BDs
406 */
qed_chain_get_capacity(struct qed_chain * p_chain)407 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
408 {
409 return p_chain->capacity;
410 }
411
412 /**
413 * @brief qed_chain_recycle_consumed -
414 *
415 * Returns an element which was previously consumed;
416 * Increments producers so they could be written to FW.
417 *
418 * @param p_chain
419 */
qed_chain_recycle_consumed(struct qed_chain * p_chain)420 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
421 {
422 test_and_skip(p_chain, prod_idx);
423 if (is_chain_u16(p_chain))
424 p_chain->u.chain16.prod_idx++;
425 else
426 p_chain->u.chain32.prod_idx++;
427 }
428
429 /**
430 * @brief qed_chain_consume -
431 *
432 * A Chain in which the driver utilizes data written by a different source
433 * (i.e., FW) should use this to access passed buffers.
434 *
435 * @param p_chain
436 *
437 * @return void*, a pointer to the next buffer written
438 */
qed_chain_consume(struct qed_chain * p_chain)439 static inline void *qed_chain_consume(struct qed_chain *p_chain)
440 {
441 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
442
443 if (is_chain_u16(p_chain)) {
444 if ((p_chain->u.chain16.cons_idx &
445 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
446 p_cons_idx = &p_chain->u.chain16.cons_idx;
447 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
448 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
449 p_cons_idx, p_cons_page_idx);
450 }
451 p_chain->u.chain16.cons_idx++;
452 } else {
453 if ((p_chain->u.chain32.cons_idx &
454 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
455 p_cons_idx = &p_chain->u.chain32.cons_idx;
456 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
457 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
458 p_cons_idx, p_cons_page_idx);
459 }
460 p_chain->u.chain32.cons_idx++;
461 }
462
463 p_ret = p_chain->p_cons_elem;
464 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
465 p_chain->elem_size);
466
467 return p_ret;
468 }
469
470 /**
471 * @brief qed_chain_reset - Resets the chain to its start state
472 *
473 * @param p_chain pointer to a previously allocted chain
474 */
qed_chain_reset(struct qed_chain * p_chain)475 static inline void qed_chain_reset(struct qed_chain *p_chain)
476 {
477 u32 i;
478
479 if (is_chain_u16(p_chain)) {
480 p_chain->u.chain16.prod_idx = 0;
481 p_chain->u.chain16.cons_idx = 0;
482 } else {
483 p_chain->u.chain32.prod_idx = 0;
484 p_chain->u.chain32.cons_idx = 0;
485 }
486 p_chain->p_cons_elem = p_chain->p_virt_addr;
487 p_chain->p_prod_elem = p_chain->p_virt_addr;
488
489 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
490 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
491 * indices, to avoid unnecessary page advancing on the first
492 * call to qed_chain_produce/consume. Instead, the indices
493 * will be advanced to page_cnt and then will be wrapped to 0.
494 */
495 u32 reset_val = p_chain->page_cnt - 1;
496
497 if (is_chain_u16(p_chain)) {
498 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
499 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
500 } else {
501 p_chain->pbl.c.u32.prod_page_idx = reset_val;
502 p_chain->pbl.c.u32.cons_page_idx = reset_val;
503 }
504 }
505
506 switch (p_chain->intended_use) {
507 case QED_CHAIN_USE_TO_CONSUME:
508 /* produce empty elements */
509 for (i = 0; i < p_chain->capacity; i++)
510 qed_chain_recycle_consumed(p_chain);
511 break;
512
513 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
514 case QED_CHAIN_USE_TO_PRODUCE:
515 default:
516 /* Do nothing */
517 break;
518 }
519 }
520
521 /**
522 * @brief qed_chain_get_last_elem -
523 *
524 * Returns a pointer to the last element of the chain
525 *
526 * @param p_chain
527 *
528 * @return void*
529 */
qed_chain_get_last_elem(struct qed_chain * p_chain)530 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
531 {
532 struct qed_chain_next *p_next = NULL;
533 void *p_virt_addr = NULL;
534 u32 size, last_page_idx;
535
536 if (!p_chain->p_virt_addr)
537 goto out;
538
539 switch (p_chain->mode) {
540 case QED_CHAIN_MODE_NEXT_PTR:
541 size = p_chain->elem_size * p_chain->usable_per_page;
542 p_virt_addr = p_chain->p_virt_addr;
543 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
544 while (p_next->next_virt != p_chain->p_virt_addr) {
545 p_virt_addr = p_next->next_virt;
546 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
547 size);
548 }
549 break;
550 case QED_CHAIN_MODE_SINGLE:
551 p_virt_addr = p_chain->p_virt_addr;
552 break;
553 case QED_CHAIN_MODE_PBL:
554 last_page_idx = p_chain->page_cnt - 1;
555 p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
556 break;
557 }
558 /* p_virt_addr points at this stage to the last page of the chain */
559 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
560 p_virt_addr = (u8 *)p_virt_addr + size;
561 out:
562 return p_virt_addr;
563 }
564
565 /**
566 * @brief qed_chain_set_prod - sets the prod to the given value
567 *
568 * @param prod_idx
569 * @param p_prod_elem
570 */
qed_chain_set_prod(struct qed_chain * p_chain,u32 prod_idx,void * p_prod_elem)571 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
572 u32 prod_idx, void *p_prod_elem)
573 {
574 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
575 u32 cur_prod, page_mask, page_cnt, page_diff;
576
577 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
578 p_chain->u.chain32.prod_idx;
579
580 /* Assume that number of elements in a page is power of 2 */
581 page_mask = ~p_chain->elem_per_page_mask;
582
583 /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
584 * reaches the first element of next page before the page index
585 * is incremented. See qed_chain_produce().
586 * Index wrap around is not a problem because the difference
587 * between current and given producer indices is always
588 * positive and lower than the chain's capacity.
589 */
590 page_diff = (((cur_prod - 1) & page_mask) -
591 ((prod_idx - 1) & page_mask)) /
592 p_chain->elem_per_page;
593
594 page_cnt = qed_chain_get_page_cnt(p_chain);
595 if (is_chain_u16(p_chain))
596 p_chain->pbl.c.u16.prod_page_idx =
597 (p_chain->pbl.c.u16.prod_page_idx -
598 page_diff + page_cnt) % page_cnt;
599 else
600 p_chain->pbl.c.u32.prod_page_idx =
601 (p_chain->pbl.c.u32.prod_page_idx -
602 page_diff + page_cnt) % page_cnt;
603 }
604
605 if (is_chain_u16(p_chain))
606 p_chain->u.chain16.prod_idx = (u16) prod_idx;
607 else
608 p_chain->u.chain32.prod_idx = prod_idx;
609 p_chain->p_prod_elem = p_prod_elem;
610 }
611
612 /**
613 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
614 *
615 * @param p_chain
616 */
qed_chain_pbl_zero_mem(struct qed_chain * p_chain)617 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
618 {
619 u32 i, page_cnt;
620
621 if (p_chain->mode != QED_CHAIN_MODE_PBL)
622 return;
623
624 page_cnt = qed_chain_get_page_cnt(p_chain);
625
626 for (i = 0; i < page_cnt; i++)
627 memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
628 p_chain->page_size);
629 }
630
631 #endif
632