Lines Matching refs:ps

603 					    struct hfi1_pkt_state *ps)  in hfi1_make_ruc_header_16B()  argument
606 struct hfi1_ibport *ibp = ps->ibp; in hfi1_make_ruc_header_16B()
613 (ps->s_txreq->hdr_dwords << 2), in hfi1_make_ruc_header_16B()
614 ps->s_txreq->s_cur_size); in hfi1_make_ruc_header_16B()
615 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + in hfi1_make_ruc_header_16B()
630 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; in hfi1_make_ruc_header_16B()
632 ps->s_txreq->hdr_dwords += in hfi1_make_ruc_header_16B()
634 ps->s_txreq->hdr_dwords - LRH_16B_DWORDS, in hfi1_make_ruc_header_16B()
666 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah, in hfi1_make_ruc_header_16B()
670 (ps->s_txreq->hdr_dwords + nwords) >> 1, in hfi1_make_ruc_header_16B()
691 struct hfi1_pkt_state *ps) in hfi1_make_ruc_header_9B() argument
694 struct hfi1_ibport *ibp = ps->ibp; in hfi1_make_ruc_header_9B()
698 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3; in hfi1_make_ruc_header_9B()
699 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + in hfi1_make_ruc_header_9B()
703 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; in hfi1_make_ruc_header_9B()
706 ps->s_txreq->hdr_dwords += in hfi1_make_ruc_header_9B()
709 ps->s_txreq->hdr_dwords - LRH_9B_DWORDS, in hfi1_make_ruc_header_9B()
735 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, in hfi1_make_ruc_header_9B()
737 ps->s_txreq->hdr_dwords + nwords, in hfi1_make_ruc_header_9B()
746 struct hfi1_pkt_state *ps);
756 struct hfi1_pkt_state *ps) in hfi1_make_ruc_header() argument
776 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps); in hfi1_make_ruc_header()
796 struct hfi1_pkt_state *ps) in schedule_send_yield() argument
798 ps->pkts_sent = true; in schedule_send_yield()
800 if (unlikely(time_after(jiffies, ps->timeout))) { in schedule_send_yield()
801 if (!ps->in_thread || in schedule_send_yield()
802 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) { in schedule_send_yield()
803 spin_lock_irqsave(&qp->s_lock, ps->flags); in schedule_send_yield()
806 spin_unlock_irqrestore(&qp->s_lock, ps->flags); in schedule_send_yield()
807 this_cpu_inc(*ps->ppd->dd->send_schedule); in schedule_send_yield()
813 this_cpu_inc(*ps->ppd->dd->send_schedule); in schedule_send_yield()
814 ps->timeout = jiffies + ps->timeout_int; in schedule_send_yield()
845 struct hfi1_pkt_state ps; in hfi1_do_send() local
847 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps); in hfi1_do_send()
849 ps.dev = to_idev(qp->ibqp.device); in hfi1_do_send()
850 ps.ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_do_send()
851 ps.ppd = ppd_from_ibp(ps.ibp); in hfi1_do_send()
852 ps.in_thread = in_thread; in hfi1_do_send()
859 ~((1 << ps.ppd->lmc) - 1)) == in hfi1_do_send()
860 ps.ppd->lid)) { in hfi1_do_send()
865 ps.timeout_int = qp->timeout_jiffies; in hfi1_do_send()
869 ~((1 << ps.ppd->lmc) - 1)) == in hfi1_do_send()
870 ps.ppd->lid)) { in hfi1_do_send()
875 ps.timeout_int = SEND_RESCHED_TIMEOUT; in hfi1_do_send()
879 ps.timeout_int = SEND_RESCHED_TIMEOUT; in hfi1_do_send()
882 spin_lock_irqsave(&qp->s_lock, ps.flags); in hfi1_do_send()
886 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_send()
892 ps.timeout_int = ps.timeout_int / 8; in hfi1_do_send()
893 ps.timeout = jiffies + ps.timeout_int; in hfi1_do_send()
894 ps.cpu = priv->s_sde ? priv->s_sde->cpu : in hfi1_do_send()
895 cpumask_first(cpumask_of_node(ps.ppd->dd->node)); in hfi1_do_send()
896 ps.pkts_sent = false; in hfi1_do_send()
899 ps.s_txreq = get_waiting_verbs_txreq(qp); in hfi1_do_send()
902 if (ps.s_txreq) { in hfi1_do_send()
903 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_send()
908 if (hfi1_verbs_send(qp, &ps)) in hfi1_do_send()
911 if (schedule_send_yield(qp, &ps)) in hfi1_do_send()
914 spin_lock_irqsave(&qp->s_lock, ps.flags); in hfi1_do_send()
916 } while (make_req(qp, &ps)); in hfi1_do_send()
917 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); in hfi1_do_send()
918 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_send()