Lines Matching refs:csk

469 		     struct cxgbit_sock *csk)  in cxgbit_set_conn_info()  argument
472 conn->login_sockaddr = csk->com.remote_addr; in cxgbit_set_conn_info()
473 conn->local_sockaddr = csk->com.local_addr; in cxgbit_set_conn_info()
479 struct cxgbit_sock *csk; in cxgbit_accept_np() local
504 csk = list_first_entry(&cnp->np_accept_list, in cxgbit_accept_np()
508 list_del_init(&csk->accept_node); in cxgbit_accept_np()
510 conn->context = csk; in cxgbit_accept_np()
511 csk->conn = conn; in cxgbit_accept_np()
513 cxgbit_set_conn_info(np, conn, csk); in cxgbit_accept_np()
598 static void __cxgbit_free_conn(struct cxgbit_sock *csk);
603 struct cxgbit_sock *csk, *tmp; in cxgbit_free_np() local
612 list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) { in cxgbit_free_np()
613 list_del_init(&csk->accept_node); in cxgbit_free_np()
614 __cxgbit_free_conn(csk); in cxgbit_free_np()
622 static void cxgbit_send_halfclose(struct cxgbit_sock *csk) in cxgbit_send_halfclose() argument
631 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx, in cxgbit_send_halfclose()
635 __skb_queue_tail(&csk->txq, skb); in cxgbit_send_halfclose()
636 cxgbit_push_tx_frames(csk); in cxgbit_send_halfclose()
641 struct cxgbit_sock *csk = handle; in cxgbit_arp_failure_discard() local
645 cxgbit_put_csk(csk); in cxgbit_arp_failure_discard()
658 static int cxgbit_send_abort_req(struct cxgbit_sock *csk) in cxgbit_send_abort_req() argument
664 __func__, csk, csk->tid, csk->com.state); in cxgbit_send_abort_req()
666 __skb_queue_purge(&csk->txq); in cxgbit_send_abort_req()
668 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) in cxgbit_send_abort_req()
669 cxgbit_send_tx_flowc_wr(csk); in cxgbit_send_abort_req()
671 skb = __skb_dequeue(&csk->skbq); in cxgbit_send_abort_req()
672 cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx, in cxgbit_send_abort_req()
673 csk->com.cdev, cxgbit_abort_arp_failure); in cxgbit_send_abort_req()
675 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); in cxgbit_send_abort_req()
679 __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb) in __cxgbit_abort_conn() argument
683 if (csk->com.state != CSK_STATE_ESTABLISHED) in __cxgbit_abort_conn()
686 set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags); in __cxgbit_abort_conn()
687 csk->com.state = CSK_STATE_ABORTING; in __cxgbit_abort_conn()
689 cxgbit_send_abort_req(csk); in __cxgbit_abort_conn()
694 cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE); in __cxgbit_abort_conn()
695 cxgbit_put_csk(csk); in __cxgbit_abort_conn()
698 void cxgbit_abort_conn(struct cxgbit_sock *csk) in cxgbit_abort_conn() argument
702 cxgbit_get_csk(csk); in cxgbit_abort_conn()
703 cxgbit_init_wr_wait(&csk->com.wr_wait); in cxgbit_abort_conn()
705 spin_lock_bh(&csk->lock); in cxgbit_abort_conn()
706 if (csk->lock_owner) { in cxgbit_abort_conn()
708 __skb_queue_tail(&csk->backlogq, skb); in cxgbit_abort_conn()
710 __cxgbit_abort_conn(csk, skb); in cxgbit_abort_conn()
712 spin_unlock_bh(&csk->lock); in cxgbit_abort_conn()
714 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait, in cxgbit_abort_conn()
715 csk->tid, 600, __func__); in cxgbit_abort_conn()
718 static void __cxgbit_free_conn(struct cxgbit_sock *csk) in __cxgbit_free_conn() argument
720 struct iscsit_conn *conn = csk->conn; in __cxgbit_free_conn()
724 __func__, csk->com.state); in __cxgbit_free_conn()
726 spin_lock_bh(&csk->lock); in __cxgbit_free_conn()
727 switch (csk->com.state) { in __cxgbit_free_conn()
730 csk->com.state = CSK_STATE_CLOSING; in __cxgbit_free_conn()
731 cxgbit_send_halfclose(csk); in __cxgbit_free_conn()
733 csk->com.state = CSK_STATE_ABORTING; in __cxgbit_free_conn()
734 cxgbit_send_abort_req(csk); in __cxgbit_free_conn()
738 csk->com.state = CSK_STATE_MORIBUND; in __cxgbit_free_conn()
739 cxgbit_send_halfclose(csk); in __cxgbit_free_conn()
746 __func__, csk, csk->com.state); in __cxgbit_free_conn()
748 spin_unlock_bh(&csk->lock); in __cxgbit_free_conn()
751 cxgbit_put_csk(csk); in __cxgbit_free_conn()
759 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) in cxgbit_set_emss() argument
761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - in cxgbit_set_emss()
762 ((csk->com.remote_addr.ss_family == AF_INET) ? in cxgbit_set_emss()
765 csk->mss = csk->emss; in cxgbit_set_emss()
767 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4); in cxgbit_set_emss()
768 if (csk->emss < 128) in cxgbit_set_emss()
769 csk->emss = 128; in cxgbit_set_emss()
770 if (csk->emss & 7) in cxgbit_set_emss()
772 TCPOPT_MSS_G(opt), csk->mss, csk->emss); in cxgbit_set_emss()
774 csk->mss, csk->emss); in cxgbit_set_emss()
777 static void cxgbit_free_skb(struct cxgbit_sock *csk) in cxgbit_free_skb() argument
781 __skb_queue_purge(&csk->txq); in cxgbit_free_skb()
782 __skb_queue_purge(&csk->rxq); in cxgbit_free_skb()
783 __skb_queue_purge(&csk->backlogq); in cxgbit_free_skb()
784 __skb_queue_purge(&csk->ppodq); in cxgbit_free_skb()
785 __skb_queue_purge(&csk->skbq); in cxgbit_free_skb()
787 while ((skb = cxgbit_sock_dequeue_wr(csk))) in cxgbit_free_skb()
790 __kfree_skb(csk->lro_hskb); in cxgbit_free_skb()
795 struct cxgbit_sock *csk; in _cxgbit_free_csk() local
798 csk = container_of(kref, struct cxgbit_sock, kref); in _cxgbit_free_csk()
800 pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state); in _cxgbit_free_csk()
802 if (csk->com.local_addr.ss_family == AF_INET6) { in _cxgbit_free_csk()
804 &csk->com.local_addr; in _cxgbit_free_csk()
805 cxgb4_clip_release(csk->com.cdev->lldi.ports[0], in _cxgbit_free_csk()
810 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid, in _cxgbit_free_csk()
811 csk->com.local_addr.ss_family); in _cxgbit_free_csk()
812 dst_release(csk->dst); in _cxgbit_free_csk()
813 cxgb4_l2t_release(csk->l2t); in _cxgbit_free_csk()
815 cdev = csk->com.cdev; in _cxgbit_free_csk()
817 list_del(&csk->list); in _cxgbit_free_csk()
820 cxgbit_free_skb(csk); in _cxgbit_free_csk()
821 cxgbit_put_cnp(csk->cnp); in _cxgbit_free_csk()
824 kfree(csk); in _cxgbit_free_csk()
827 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) in cxgbit_set_tcp_window() argument
836 csk->rcv_win = CXGBIT_10G_RCV_WIN; in cxgbit_set_tcp_window()
838 csk->rcv_win *= scale; in cxgbit_set_tcp_window()
839 csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10); in cxgbit_set_tcp_window()
842 csk->snd_win = CXGBIT_10G_SND_WIN; in cxgbit_set_tcp_window()
844 csk->snd_win *= scale; in cxgbit_set_tcp_window()
845 csk->snd_win = min(csk->snd_win, 512U * 1024); in cxgbit_set_tcp_window()
848 __func__, csk->snd_win, csk->rcv_win); in cxgbit_set_tcp_window()
899 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, in cxgbit_offload_init() argument
933 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, in cxgbit_offload_init()
935 if (!csk->l2t) in cxgbit_offload_init()
937 csk->mtu = ndev->mtu; in cxgbit_offload_init()
938 csk->tx_chan = cxgb4_port_chan(ndev); in cxgbit_offload_init()
939 csk->smac_idx = in cxgbit_offload_init()
943 csk->txq_idx = cxgb4_port_idx(ndev) * step; in cxgbit_offload_init()
946 csk->ctrlq_idx = cxgb4_port_idx(ndev); in cxgbit_offload_init()
947 csk->rss_qid = cdev->lldi.rxq_ids[ in cxgbit_offload_init()
949 csk->port_id = cxgb4_port_idx(ndev); in cxgbit_offload_init()
950 cxgbit_set_tcp_window(csk, in cxgbit_offload_init()
964 csk->dcb_priority = priority; in cxgbit_offload_init()
966 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority); in cxgbit_offload_init()
968 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0); in cxgbit_offload_init()
970 if (!csk->l2t) in cxgbit_offload_init()
973 csk->mtu = dst_mtu(dst); in cxgbit_offload_init()
974 csk->tx_chan = cxgb4_port_chan(ndev); in cxgbit_offload_init()
975 csk->smac_idx = in cxgbit_offload_init()
979 csk->txq_idx = (port_id * step) + in cxgbit_offload_init()
981 csk->ctrlq_idx = cxgb4_port_idx(ndev); in cxgbit_offload_init()
986 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx]; in cxgbit_offload_init()
987 csk->port_id = port_id; in cxgbit_offload_init()
988 cxgbit_set_tcp_window(csk, in cxgbit_offload_init()
1045 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_send_rx_credits() argument
1047 if (csk->com.state != CSK_STATE_ESTABLISHED) { in cxgbit_send_rx_credits()
1052 cxgbit_ofld_send(csk->com.cdev, skb); in cxgbit_send_rx_credits()
1060 int cxgbit_rx_data_ack(struct cxgbit_sock *csk) in cxgbit_rx_data_ack() argument
1071 RX_CREDITS_V(csk->rx_credits); in cxgbit_rx_data_ack()
1073 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, in cxgbit_rx_data_ack()
1076 csk->rx_credits = 0; in cxgbit_rx_data_ack()
1078 spin_lock_bh(&csk->lock); in cxgbit_rx_data_ack()
1079 if (csk->lock_owner) { in cxgbit_rx_data_ack()
1081 __skb_queue_tail(&csk->backlogq, skb); in cxgbit_rx_data_ack()
1082 spin_unlock_bh(&csk->lock); in cxgbit_rx_data_ack()
1086 cxgbit_send_rx_credits(csk, skb); in cxgbit_rx_data_ack()
1087 spin_unlock_bh(&csk->lock); in cxgbit_rx_data_ack()
1094 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk) in cxgbit_alloc_csk_skb() argument
1113 __skb_queue_tail(&csk->skbq, skb); in cxgbit_alloc_csk_skb()
1121 csk->lro_hskb = skb; in cxgbit_alloc_csk_skb()
1125 __skb_queue_purge(&csk->skbq); in cxgbit_alloc_csk_skb()
1130 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) in cxgbit_pass_accept_rpl() argument
1135 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_pass_accept_rpl()
1143 pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid); in cxgbit_pass_accept_rpl()
1147 cxgbit_put_csk(csk); in cxgbit_pass_accept_rpl()
1153 INIT_TP_WR(rpl5, csk->tid); in cxgbit_pass_accept_rpl()
1155 csk->tid)); in cxgbit_pass_accept_rpl()
1156 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx, in cxgbit_pass_accept_rpl()
1158 (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1); in cxgbit_pass_accept_rpl()
1159 wscale = cxgb_compute_wscale(csk->rcv_win); in cxgbit_pass_accept_rpl()
1164 win = csk->rcv_win >> 10; in cxgbit_pass_accept_rpl()
1170 L2T_IDX_V(csk->l2t->idx) | in cxgbit_pass_accept_rpl()
1171 TX_CHAN_V(csk->tx_chan) | in cxgbit_pass_accept_rpl()
1172 SMAC_SEL_V(csk->smac_idx) | in cxgbit_pass_accept_rpl()
1173 DSCP_V(csk->tos >> 2) | in cxgbit_pass_accept_rpl()
1178 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); in cxgbit_pass_accept_rpl()
1211 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); in cxgbit_pass_accept_rpl()
1212 t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard); in cxgbit_pass_accept_rpl()
1213 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); in cxgbit_pass_accept_rpl()
1219 struct cxgbit_sock *csk = NULL; in cxgbit_pass_accept_req() local
1250 csk = lookup_tid(t, tid); in cxgbit_pass_accept_req()
1251 if (csk) { in cxgbit_pass_accept_req()
1291 csk = kzalloc(sizeof(*csk), GFP_ATOMIC); in cxgbit_pass_accept_req()
1292 if (!csk) { in cxgbit_pass_accept_req()
1297 ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port), in cxgbit_pass_accept_req()
1303 kfree(csk); in cxgbit_pass_accept_req()
1307 kref_init(&csk->kref); in cxgbit_pass_accept_req()
1308 init_completion(&csk->com.wr_wait.completion); in cxgbit_pass_accept_req()
1310 INIT_LIST_HEAD(&csk->accept_node); in cxgbit_pass_accept_req()
1314 if (peer_mss && csk->mtu > (peer_mss + hdrs)) in cxgbit_pass_accept_req()
1315 csk->mtu = peer_mss + hdrs; in cxgbit_pass_accept_req()
1317 csk->com.state = CSK_STATE_CONNECTING; in cxgbit_pass_accept_req()
1318 csk->com.cdev = cdev; in cxgbit_pass_accept_req()
1319 csk->cnp = cnp; in cxgbit_pass_accept_req()
1320 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); in cxgbit_pass_accept_req()
1321 csk->dst = dst; in cxgbit_pass_accept_req()
1322 csk->tid = tid; in cxgbit_pass_accept_req()
1323 csk->wr_cred = cdev->lldi.wr_cred - in cxgbit_pass_accept_req()
1325 csk->wr_max_cred = csk->wr_cred; in cxgbit_pass_accept_req()
1326 csk->wr_una_cred = 0; in cxgbit_pass_accept_req()
1330 &csk->com.local_addr; in cxgbit_pass_accept_req()
1335 sin = (struct sockaddr_in *)&csk->com.remote_addr; in cxgbit_pass_accept_req()
1341 &csk->com.local_addr; in cxgbit_pass_accept_req()
1350 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr; in cxgbit_pass_accept_req()
1356 skb_queue_head_init(&csk->rxq); in cxgbit_pass_accept_req()
1357 skb_queue_head_init(&csk->txq); in cxgbit_pass_accept_req()
1358 skb_queue_head_init(&csk->ppodq); in cxgbit_pass_accept_req()
1359 skb_queue_head_init(&csk->backlogq); in cxgbit_pass_accept_req()
1360 skb_queue_head_init(&csk->skbq); in cxgbit_pass_accept_req()
1361 cxgbit_sock_reset_wr_list(csk); in cxgbit_pass_accept_req()
1362 spin_lock_init(&csk->lock); in cxgbit_pass_accept_req()
1363 init_waitqueue_head(&csk->waitq); in cxgbit_pass_accept_req()
1364 csk->lock_owner = false; in cxgbit_pass_accept_req()
1366 if (cxgbit_alloc_csk_skb(csk)) { in cxgbit_pass_accept_req()
1368 kfree(csk); in cxgbit_pass_accept_req()
1376 list_add_tail(&csk->list, &cdev->cskq.list); in cxgbit_pass_accept_req()
1378 cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family); in cxgbit_pass_accept_req()
1379 cxgbit_pass_accept_rpl(csk, req); in cxgbit_pass_accept_req()
1389 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp, in cxgbit_tx_flowc_wr_credits() argument
1396 if (csk->snd_wscale) in cxgbit_tx_flowc_wr_credits()
1416 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk) in cxgbit_send_tx_flowc_wr() argument
1418 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_send_tx_flowc_wr()
1425 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; in cxgbit_send_tx_flowc_wr()
1428 flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen); in cxgbit_send_tx_flowc_wr()
1430 skb = __skb_dequeue(&csk->skbq); in cxgbit_send_tx_flowc_wr()
1436 FW_WR_FLOWID_V(csk->tid)); in cxgbit_send_tx_flowc_wr()
1439 (csk->com.cdev->lldi.pf)); in cxgbit_send_tx_flowc_wr()
1441 flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan); in cxgbit_send_tx_flowc_wr()
1443 flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan); in cxgbit_send_tx_flowc_wr()
1445 flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid); in cxgbit_send_tx_flowc_wr()
1447 flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt); in cxgbit_send_tx_flowc_wr()
1449 flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt); in cxgbit_send_tx_flowc_wr()
1451 flowc->mnemval[6].val = cpu_to_be32(csk->snd_win); in cxgbit_send_tx_flowc_wr()
1453 flowc->mnemval[7].val = cpu_to_be32(csk->emss); in cxgbit_send_tx_flowc_wr()
1463 if (csk->snd_wscale) { in cxgbit_send_tx_flowc_wr()
1465 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale); in cxgbit_send_tx_flowc_wr()
1472 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid); in cxgbit_send_tx_flowc_wr()
1481 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt, in cxgbit_send_tx_flowc_wr()
1482 csk->rcv_nxt, csk->snd_win, csk->emss); in cxgbit_send_tx_flowc_wr()
1483 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); in cxgbit_send_tx_flowc_wr()
1484 cxgbit_ofld_send(csk->com.cdev, skb); in cxgbit_send_tx_flowc_wr()
1489 cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_send_tcb_skb() argument
1491 spin_lock_bh(&csk->lock); in cxgbit_send_tcb_skb()
1492 if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) { in cxgbit_send_tcb_skb()
1493 spin_unlock_bh(&csk->lock); in cxgbit_send_tcb_skb()
1495 __func__, csk, csk->tid, csk->com.state); in cxgbit_send_tcb_skb()
1500 cxgbit_get_csk(csk); in cxgbit_send_tcb_skb()
1501 cxgbit_init_wr_wait(&csk->com.wr_wait); in cxgbit_send_tcb_skb()
1502 cxgbit_ofld_send(csk->com.cdev, skb); in cxgbit_send_tcb_skb()
1503 spin_unlock_bh(&csk->lock); in cxgbit_send_tcb_skb()
1508 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk) in cxgbit_setup_conn_digest() argument
1512 u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC; in cxgbit_setup_conn_digest()
1513 u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC; in cxgbit_setup_conn_digest()
1524 INIT_TP_WR(req, csk->tid); in cxgbit_setup_conn_digest()
1525 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in cxgbit_setup_conn_digest()
1526 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in cxgbit_setup_conn_digest()
1531 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); in cxgbit_setup_conn_digest()
1533 if (cxgbit_send_tcb_skb(csk, skb)) in cxgbit_setup_conn_digest()
1536 ret = cxgbit_wait_for_reply(csk->com.cdev, in cxgbit_setup_conn_digest()
1537 &csk->com.wr_wait, in cxgbit_setup_conn_digest()
1538 csk->tid, 5, __func__); in cxgbit_setup_conn_digest()
1545 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx) in cxgbit_setup_conn_pgidx() argument
1558 INIT_TP_WR(req, csk->tid); in cxgbit_setup_conn_pgidx()
1559 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in cxgbit_setup_conn_pgidx()
1560 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in cxgbit_setup_conn_pgidx()
1564 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); in cxgbit_setup_conn_pgidx()
1566 if (cxgbit_send_tcb_skb(csk, skb)) in cxgbit_setup_conn_pgidx()
1569 ret = cxgbit_wait_for_reply(csk->com.cdev, in cxgbit_setup_conn_pgidx()
1570 &csk->com.wr_wait, in cxgbit_setup_conn_pgidx()
1571 csk->tid, 5, __func__); in cxgbit_setup_conn_pgidx()
1628 struct cxgbit_sock *csk; in cxgbit_pass_establish() local
1634 csk = lookup_tid(t, tid); in cxgbit_pass_establish()
1635 if (unlikely(!csk)) { in cxgbit_pass_establish()
1639 cnp = csk->cnp; in cxgbit_pass_establish()
1642 __func__, csk, tid, cnp); in cxgbit_pass_establish()
1644 csk->write_seq = snd_isn; in cxgbit_pass_establish()
1645 csk->snd_una = snd_isn; in cxgbit_pass_establish()
1646 csk->snd_nxt = snd_isn; in cxgbit_pass_establish()
1648 csk->rcv_nxt = rcv_isn; in cxgbit_pass_establish()
1650 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); in cxgbit_pass_establish()
1651 cxgbit_set_emss(csk, tcp_opt); in cxgbit_pass_establish()
1652 dst_confirm(csk->dst); in cxgbit_pass_establish()
1653 csk->com.state = CSK_STATE_ESTABLISHED; in cxgbit_pass_establish()
1655 list_add_tail(&csk->accept_node, &cnp->np_accept_list); in cxgbit_pass_establish()
1662 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_queue_rx_skb() argument
1665 spin_lock_bh(&csk->rxq.lock); in cxgbit_queue_rx_skb()
1666 __skb_queue_tail(&csk->rxq, skb); in cxgbit_queue_rx_skb()
1667 spin_unlock_bh(&csk->rxq.lock); in cxgbit_queue_rx_skb()
1668 wake_up(&csk->waitq); in cxgbit_queue_rx_skb()
1671 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_peer_close() argument
1674 __func__, csk, csk->tid, csk->com.state); in cxgbit_peer_close()
1676 switch (csk->com.state) { in cxgbit_peer_close()
1678 csk->com.state = CSK_STATE_CLOSING; in cxgbit_peer_close()
1679 cxgbit_queue_rx_skb(csk, skb); in cxgbit_peer_close()
1683 csk->com.state = CSK_STATE_MORIBUND; in cxgbit_peer_close()
1686 csk->com.state = CSK_STATE_DEAD; in cxgbit_peer_close()
1687 cxgbit_put_csk(csk); in cxgbit_peer_close()
1693 __func__, csk->com.state); in cxgbit_peer_close()
1699 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_close_con_rpl() argument
1702 __func__, csk, csk->tid, csk->com.state); in cxgbit_close_con_rpl()
1704 switch (csk->com.state) { in cxgbit_close_con_rpl()
1706 csk->com.state = CSK_STATE_MORIBUND; in cxgbit_close_con_rpl()
1709 csk->com.state = CSK_STATE_DEAD; in cxgbit_close_con_rpl()
1710 cxgbit_put_csk(csk); in cxgbit_close_con_rpl()
1717 __func__, csk->com.state); in cxgbit_close_con_rpl()
1723 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_abort_req_rss() argument
1733 __func__, csk, tid, csk->com.state); in cxgbit_abort_req_rss()
1741 switch (csk->com.state) { in cxgbit_abort_req_rss()
1744 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1748 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1752 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1753 if (!csk->conn) in cxgbit_abort_req_rss()
1760 __func__, csk->com.state); in cxgbit_abort_req_rss()
1761 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1764 __skb_queue_purge(&csk->txq); in cxgbit_abort_req_rss()
1766 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) in cxgbit_abort_req_rss()
1767 cxgbit_send_tx_flowc_wr(csk); in cxgbit_abort_req_rss()
1769 rpl_skb = __skb_dequeue(&csk->skbq); in cxgbit_abort_req_rss()
1771 cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx); in cxgbit_abort_req_rss()
1772 cxgbit_ofld_send(csk->com.cdev, rpl_skb); in cxgbit_abort_req_rss()
1775 cxgbit_queue_rx_skb(csk, skb); in cxgbit_abort_req_rss()
1780 cxgbit_put_csk(csk); in cxgbit_abort_req_rss()
1785 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_abort_rpl_rss() argument
1790 __func__, csk, csk->tid, csk->com.state); in cxgbit_abort_rpl_rss()
1792 switch (csk->com.state) { in cxgbit_abort_rpl_rss()
1794 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_rpl_rss()
1795 if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags)) in cxgbit_abort_rpl_rss()
1796 cxgbit_wake_up(&csk->com.wr_wait, __func__, in cxgbit_abort_rpl_rss()
1798 cxgbit_put_csk(csk); in cxgbit_abort_rpl_rss()
1802 __func__, csk->com.state); in cxgbit_abort_rpl_rss()
1808 static bool cxgbit_credit_err(const struct cxgbit_sock *csk) in cxgbit_credit_err() argument
1810 const struct sk_buff *skb = csk->wr_pending_head; in cxgbit_credit_err()
1813 if (unlikely(csk->wr_cred > csk->wr_max_cred)) { in cxgbit_credit_err()
1815 csk, csk->tid, csk->wr_cred, csk->wr_max_cred); in cxgbit_credit_err()
1824 if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) { in cxgbit_credit_err()
1826 csk, csk->tid, csk->wr_cred, in cxgbit_credit_err()
1827 credit, csk->wr_max_cred); in cxgbit_credit_err()
1835 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_fw4_ack() argument
1841 csk->wr_cred += credits; in cxgbit_fw4_ack()
1842 if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred)) in cxgbit_fw4_ack()
1843 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; in cxgbit_fw4_ack()
1846 struct sk_buff *p = cxgbit_sock_peek_wr(csk); in cxgbit_fw4_ack()
1851 csk, csk->tid, credits, in cxgbit_fw4_ack()
1852 csk->wr_cred, csk->wr_una_cred); in cxgbit_fw4_ack()
1859 csk, csk->tid, in cxgbit_fw4_ack()
1860 credits, csk->wr_cred, csk->wr_una_cred, in cxgbit_fw4_ack()
1866 cxgbit_sock_dequeue_wr(csk); in cxgbit_fw4_ack()
1871 if (unlikely(cxgbit_credit_err(csk))) { in cxgbit_fw4_ack()
1872 cxgbit_queue_rx_skb(csk, skb); in cxgbit_fw4_ack()
1877 if (unlikely(before(snd_una, csk->snd_una))) { in cxgbit_fw4_ack()
1879 csk, csk->tid, snd_una, in cxgbit_fw4_ack()
1880 csk->snd_una); in cxgbit_fw4_ack()
1884 if (csk->snd_una != snd_una) { in cxgbit_fw4_ack()
1885 csk->snd_una = snd_una; in cxgbit_fw4_ack()
1886 dst_confirm(csk->dst); in cxgbit_fw4_ack()
1890 if (skb_queue_len(&csk->txq)) in cxgbit_fw4_ack()
1891 cxgbit_push_tx_frames(csk); in cxgbit_fw4_ack()
1899 struct cxgbit_sock *csk; in cxgbit_set_tcb_rpl() local
1905 csk = lookup_tid(t, tid); in cxgbit_set_tcb_rpl()
1906 if (unlikely(!csk)) { in cxgbit_set_tcb_rpl()
1910 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); in cxgbit_set_tcb_rpl()
1913 cxgbit_put_csk(csk); in cxgbit_set_tcb_rpl()
1920 struct cxgbit_sock *csk; in cxgbit_rx_data() local
1926 csk = lookup_tid(t, tid); in cxgbit_rx_data()
1927 if (unlikely(!csk)) { in cxgbit_rx_data()
1932 cxgbit_queue_rx_skb(csk, skb); in cxgbit_rx_data()
1939 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) in __cxgbit_process_rx_cpl() argument
1941 spin_lock(&csk->lock); in __cxgbit_process_rx_cpl()
1942 if (csk->lock_owner) { in __cxgbit_process_rx_cpl()
1943 __skb_queue_tail(&csk->backlogq, skb); in __cxgbit_process_rx_cpl()
1944 spin_unlock(&csk->lock); in __cxgbit_process_rx_cpl()
1948 cxgbit_skcb_rx_backlog_fn(skb)(csk, skb); in __cxgbit_process_rx_cpl()
1949 spin_unlock(&csk->lock); in __cxgbit_process_rx_cpl()
1952 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_process_rx_cpl() argument
1954 cxgbit_get_csk(csk); in cxgbit_process_rx_cpl()
1955 __cxgbit_process_rx_cpl(csk, skb); in cxgbit_process_rx_cpl()
1956 cxgbit_put_csk(csk); in cxgbit_process_rx_cpl()
1961 struct cxgbit_sock *csk; in cxgbit_rx_cpl() local
1990 csk = lookup_tid(t, tid); in cxgbit_rx_cpl()
1991 if (unlikely(!csk)) { in cxgbit_rx_cpl()
1997 cxgbit_process_rx_cpl(csk, skb); in cxgbit_rx_cpl()
1999 __cxgbit_process_rx_cpl(csk, skb); in cxgbit_rx_cpl()