Lines Matching refs:pkt

254 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,  in qib_user_sdma_init_frag()  argument
261 pkt->addr[i].offset = offset; in qib_user_sdma_init_frag()
262 pkt->addr[i].length = len; in qib_user_sdma_init_frag()
263 pkt->addr[i].first_desc = first_desc; in qib_user_sdma_init_frag()
264 pkt->addr[i].last_desc = last_desc; in qib_user_sdma_init_frag()
265 pkt->addr[i].put_page = put_page; in qib_user_sdma_init_frag()
266 pkt->addr[i].dma_mapped = dma_mapped; in qib_user_sdma_init_frag()
267 pkt->addr[i].page = page; in qib_user_sdma_init_frag()
268 pkt->addr[i].kvaddr = kvaddr; in qib_user_sdma_init_frag()
269 pkt->addr[i].addr = dma_addr; in qib_user_sdma_init_frag()
270 pkt->addr[i].dma_length = dma_length; in qib_user_sdma_init_frag()
297 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_page_to_frags() argument
339 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
340 newlen = pkt->tidsm[pkt->tidsmidx].length; in qib_user_sdma_page_to_frags()
352 if ((pkt->payload_size + newlen) >= pkt->frag_size) { in qib_user_sdma_page_to_frags()
353 newlen = pkt->frag_size - pkt->payload_size; in qib_user_sdma_page_to_frags()
355 } else if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
356 if (newlen == pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
359 if (newlen == pkt->bytes_togo) in qib_user_sdma_page_to_frags()
364 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
370 pkt->bytes_togo -= newlen; in qib_user_sdma_page_to_frags()
371 pkt->payload_size += newlen; in qib_user_sdma_page_to_frags()
372 pkt->naddr++; in qib_user_sdma_page_to_frags()
373 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
379 if (pkt->bytes_togo == 0) { in qib_user_sdma_page_to_frags()
382 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
383 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
385 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
386 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
389 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
393 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
400 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
401 pkt->tidsm[pkt->tidsmidx].length -= newlen; in qib_user_sdma_page_to_frags()
402 if (pkt->tidsm[pkt->tidsmidx].length) { in qib_user_sdma_page_to_frags()
403 pkt->tidsm[pkt->tidsmidx].offset += newlen; in qib_user_sdma_page_to_frags()
405 pkt->tidsmidx++; in qib_user_sdma_page_to_frags()
406 if (pkt->tidsmidx == pkt->tidsmcount) { in qib_user_sdma_page_to_frags()
432 pbclen = pkt->addr[pkt->index].length; in qib_user_sdma_page_to_frags()
439 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr; in qib_user_sdma_page_to_frags()
446 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2)); in qib_user_sdma_page_to_frags()
451 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
473 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
474 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
476 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
477 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
480 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
484 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
492 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2)); in qib_user_sdma_page_to_frags()
497 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
501 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) + in qib_user_sdma_page_to_frags()
502 (pkt->tidsm[pkt->tidsmidx].offset>>2)); in qib_user_sdma_page_to_frags()
505 hdr->uwords[2] += pkt->payload_size; in qib_user_sdma_page_to_frags()
517 if (pkt->tiddma) in qib_user_sdma_page_to_frags()
520 seqnum.pkt++; in qib_user_sdma_page_to_frags()
524 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
530 pkt->index = pkt->naddr; in qib_user_sdma_page_to_frags()
531 pkt->payload_size = 0; in qib_user_sdma_page_to_frags()
532 pkt->naddr++; in qib_user_sdma_page_to_frags()
533 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
559 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_coalesce() argument
591 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
617 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_free_pkt_frag() argument
622 if (pkt->addr[i].page) { in qib_user_sdma_free_pkt_frag()
624 if (pkt->addr[i].dma_mapped) in qib_user_sdma_free_pkt_frag()
626 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
627 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
630 if (pkt->addr[i].kvaddr) in qib_user_sdma_free_pkt_frag()
631 kunmap(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
633 if (pkt->addr[i].put_page) in qib_user_sdma_free_pkt_frag()
634 put_user_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
636 __free_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
637 } else if (pkt->addr[i].kvaddr) { in qib_user_sdma_free_pkt_frag()
639 if (pkt->addr[i].dma_mapped) { in qib_user_sdma_free_pkt_frag()
642 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
643 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
645 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
646 } else if (pkt->addr[i].addr) { in qib_user_sdma_free_pkt_frag()
649 pkt->addr[i].kvaddr, pkt->addr[i].addr); in qib_user_sdma_free_pkt_frag()
652 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
660 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pages() argument
687 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
717 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pkt() argument
728 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
738 for (idx = 1; idx < pkt->naddr; idx++) in qib_user_sdma_pin_pkt()
739 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
744 if (pkt->addr[0].dma_mapped) { in qib_user_sdma_pin_pkt()
746 pkt->addr[0].addr, in qib_user_sdma_pin_pkt()
747 pkt->addr[0].dma_length, in qib_user_sdma_pin_pkt()
749 pkt->addr[0].addr = 0; in qib_user_sdma_pin_pkt()
750 pkt->addr[0].dma_mapped = 0; in qib_user_sdma_pin_pkt()
759 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_init_payload() argument
765 if (pkt->frag_size == pkt->bytes_togo && in qib_user_sdma_init_payload()
766 npages >= ARRAY_SIZE(pkt->addr)) in qib_user_sdma_init_payload()
767 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
769 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
779 struct qib_user_sdma_pkt *pkt, *pkt_next; in qib_user_sdma_free_pkt_list() local
781 list_for_each_entry_safe(pkt, pkt_next, list, list) { in qib_user_sdma_free_pkt_list()
784 for (i = 0; i < pkt->naddr; i++) in qib_user_sdma_free_pkt_list()
785 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
787 if (pkt->largepkt) in qib_user_sdma_free_pkt_list()
788 kfree(pkt); in qib_user_sdma_free_pkt_list()
790 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
815 struct qib_user_sdma_pkt *pkt = NULL; in qib_user_sdma_queue_pkts() local
911 pktsize = struct_size(pkt, addr, n); in qib_user_sdma_queue_pkts()
926 pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL); in qib_user_sdma_queue_pkts()
927 if (!pkt) { in qib_user_sdma_queue_pkts()
931 pkt->largepkt = 1; in qib_user_sdma_queue_pkts()
932 pkt->frag_size = frag_size; in qib_user_sdma_queue_pkts()
933 pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); in qib_user_sdma_queue_pkts()
936 char *tidsm = (char *)pkt + pktsize; in qib_user_sdma_queue_pkts()
944 pkt->tidsm = in qib_user_sdma_queue_pkts()
946 pkt->tidsmcount = tidsmsize/ in qib_user_sdma_queue_pkts()
948 pkt->tidsmidx = 0; in qib_user_sdma_queue_pkts()
959 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
960 if (!pkt) { in qib_user_sdma_queue_pkts()
964 pkt->largepkt = 0; in qib_user_sdma_queue_pkts()
965 pkt->frag_size = bytes_togo; in qib_user_sdma_queue_pkts()
966 pkt->addrlimit = ARRAY_SIZE(pkt->addr); in qib_user_sdma_queue_pkts()
968 pkt->bytes_togo = bytes_togo; in qib_user_sdma_queue_pkts()
969 pkt->payload_size = 0; in qib_user_sdma_queue_pkts()
970 pkt->counter = counter; in qib_user_sdma_queue_pkts()
971 pkt->tiddma = tiddma; in qib_user_sdma_queue_pkts()
974 qib_user_sdma_init_frag(pkt, 0, /* index */ in qib_user_sdma_queue_pkts()
980 pkt->index = 0; in qib_user_sdma_queue_pkts()
981 pkt->naddr = 1; in qib_user_sdma_queue_pkts()
984 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
992 pkt->addr[0].last_desc = 1; in qib_user_sdma_queue_pkts()
1006 pkt->addr[0].addr = dma_addr; in qib_user_sdma_queue_pkts()
1007 pkt->addr[0].dma_mapped = 1; in qib_user_sdma_queue_pkts()
1013 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1014 pkt->index = 0; /* reset index for push on hw */ in qib_user_sdma_queue_pkts()
1015 *ndesc += pkt->naddr; in qib_user_sdma_queue_pkts()
1017 list_add_tail(&pkt->list, list); in qib_user_sdma_queue_pkts()
1025 if (pkt->largepkt) in qib_user_sdma_queue_pkts()
1026 kfree(pkt); in qib_user_sdma_queue_pkts()
1028 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1052 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_clean() local
1068 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1069 s64 descd = ppd->sdma_descq_removed - pkt->added; in qib_user_sdma_queue_clean()
1074 list_move_tail(&pkt->list, &free_list); in qib_user_sdma_queue_clean()
1085 pkt = list_entry(free_list.prev, in qib_user_sdma_queue_clean()
1087 counter = pkt->counter; in qib_user_sdma_queue_clean()
1148 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_drain() local
1158 list_for_each_entry_safe(pkt, pkt_prev, in qib_user_sdma_queue_drain()
1160 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1161 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1209 struct qib_user_sdma_pkt *pkt, int idx, in qib_user_sdma_send_frag() argument
1212 const u64 addr = (u64) pkt->addr[idx].addr + in qib_user_sdma_send_frag()
1213 (u64) pkt->addr[idx].offset; in qib_user_sdma_send_frag()
1214 const u64 dwlen = (u64) pkt->addr[idx].length / 4; in qib_user_sdma_send_frag()
1221 if (pkt->addr[idx].first_desc) in qib_user_sdma_send_frag()
1223 if (pkt->addr[idx].last_desc) { in qib_user_sdma_send_frag()
1252 struct qib_user_sdma_pkt *pkt = in qib_user_sdma_send_desc() local
1259 for (i = pkt->index; i < pkt->naddr && nfree; i++) { in qib_user_sdma_send_desc()
1260 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen); in qib_user_sdma_send_desc()
1261 ofs += pkt->addr[i].length >> 2; in qib_user_sdma_send_desc()
1271 if (pkt->addr[i].last_desc == 0) in qib_user_sdma_send_desc()
1281 for (j = pkt->index; j <= i; j++) { in qib_user_sdma_send_desc()
1288 c += i + 1 - pkt->index; in qib_user_sdma_send_desc()
1289 pkt->index = i + 1; /* index for next first */ in qib_user_sdma_send_desc()
1297 if (pkt->index == pkt->naddr) { in qib_user_sdma_send_desc()
1298 pkt->added = ppd->sdma_descq_added; in qib_user_sdma_send_desc()
1299 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1300 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1301 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1302 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1303 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1304 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()