Home
last modified time | relevance | path

Searched refs:csk (Results 1 – 21 of 21) sorted by relevance

/Linux-v4.19/drivers/scsi/cxgbi/cxgb4i/
Dcxgb4i.c189 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
192 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req()
193 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req()
196 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req()
197 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req()
201 MSS_IDX_V(csk->mss_idx) | in send_act_open_req()
202 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req()
203 TX_CHAN_V(csk->tx_chan) | in send_act_open_req()
204 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req()
206 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req()
[all …]
/Linux-v4.19/drivers/scsi/cxgbi/cxgb3i/
Dcxgb3i.c155 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
157 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
160 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win); in send_act_open_req()
166 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); in send_act_open_req()
167 req->local_port = csk->saddr.sin_port; in send_act_open_req()
168 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
169 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
170 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
173 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | in send_act_open_req()
176 V_RCV_BUFSIZ(csk->rcv_win >> 10)); in send_act_open_req()
[all …]
/Linux-v4.19/drivers/target/iscsi/cxgbit/
Dcxgbit_cm.c472 struct cxgbit_sock *csk) in cxgbit_set_conn_info() argument
475 conn->login_sockaddr = csk->com.remote_addr; in cxgbit_set_conn_info()
476 conn->local_sockaddr = csk->com.local_addr; in cxgbit_set_conn_info()
482 struct cxgbit_sock *csk; in cxgbit_accept_np() local
507 csk = list_first_entry(&cnp->np_accept_list, in cxgbit_accept_np()
511 list_del_init(&csk->accept_node); in cxgbit_accept_np()
513 conn->context = csk; in cxgbit_accept_np()
514 csk->conn = conn; in cxgbit_accept_np()
516 cxgbit_set_conn_info(np, conn, csk); in cxgbit_accept_np()
615 static void cxgbit_send_halfclose(struct cxgbit_sock *csk) in cxgbit_send_halfclose() argument
[all …]
Dcxgbit_target.c30 __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso) in __cxgbit_alloc_skb() argument
49 submode |= (csk->submode & CXGBIT_SUBMODE_DCRC); in __cxgbit_alloc_skb()
63 submode |= (csk->submode & CXGBIT_SUBMODE_HCRC); in __cxgbit_alloc_skb()
70 static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len) in cxgbit_alloc_skb() argument
72 return __cxgbit_alloc_skb(csk, len, false); in cxgbit_alloc_skb()
163 cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, in cxgbit_tx_data_wr() argument
167 const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_tx_data_wr()
190 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | in cxgbit_tx_data_wr()
197 FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); in cxgbit_tx_data_wr()
205 void cxgbit_push_tx_frames(struct cxgbit_sock *csk) in cxgbit_push_tx_frames() argument
[all …]
Dcxgbit_main.c93 struct cxgbit_sock *csk; in cxgbit_close_conn() local
98 list_for_each_entry(csk, &cdev->cskq.list, list) { in cxgbit_close_conn()
103 spin_lock_bh(&csk->rxq.lock); in cxgbit_close_conn()
104 __skb_queue_tail(&csk->rxq, skb); in cxgbit_close_conn()
105 if (skb_queue_len(&csk->rxq) == 1) in cxgbit_close_conn()
107 spin_unlock_bh(&csk->rxq.lock); in cxgbit_close_conn()
110 wake_up(&csk->waitq); in cxgbit_close_conn()
168 cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb, in cxgbit_process_ddpvld() argument
173 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld); in cxgbit_process_ddpvld()
178 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld); in cxgbit_process_ddpvld()
[all …]
Dcxgbit.h262 static inline void cxgbit_get_csk(struct cxgbit_sock *csk) in cxgbit_get_csk() argument
264 kref_get(&csk->kref); in cxgbit_get_csk()
267 static inline void cxgbit_put_csk(struct cxgbit_sock *csk) in cxgbit_put_csk() argument
269 kref_put(&csk->kref, _cxgbit_free_csk); in cxgbit_put_csk()
282 static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk) in cxgbit_sock_reset_wr_list() argument
284 csk->wr_pending_tail = NULL; in cxgbit_sock_reset_wr_list()
285 csk->wr_pending_head = NULL; in cxgbit_sock_reset_wr_list()
288 static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk) in cxgbit_sock_peek_wr() argument
290 return csk->wr_pending_head; in cxgbit_sock_peek_wr()
294 cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_sock_enqueue_wr() argument
[all …]
Dcxgbit_ddp.c101 cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, in cxgbit_ppod_write_idata() argument
106 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_ppod_write_idata()
113 skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); in cxgbit_ppod_write_idata()
124 __skb_queue_tail(&csk->ppodq, skb); in cxgbit_ppod_write_idata()
130 cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, in cxgbit_ddp_set_map() argument
146 ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, in cxgbit_ddp_set_map()
174 cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, in cxgbit_ddp_reserve() argument
177 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_ddp_reserve()
215 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, in cxgbit_ddp_reserve()
218 ret = cxgbit_ddp_set_map(ppm, csk, ttinfo); in cxgbit_ddp_reserve()
[all …]
Dcxgbit_lro.h22 struct cxgbit_sock *csk; member
/Linux-v4.19/drivers/scsi/cxgbi/
Dlibcxgbi.c98 struct cxgbi_sock *csk; in cxgbi_device_portmap_cleanup() local
103 csk = pmap->port_csk[i]; in cxgbi_device_portmap_cleanup()
107 csk, cdev); in cxgbi_device_portmap_cleanup()
108 spin_lock_bh(&csk->lock); in cxgbi_device_portmap_cleanup()
109 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); in cxgbi_device_portmap_cleanup()
110 cxgbi_sock_closed(csk); in cxgbi_device_portmap_cleanup()
111 spin_unlock_bh(&csk->lock); in cxgbi_device_portmap_cleanup()
112 cxgbi_sock_put(csk); in cxgbi_device_portmap_cleanup()
423 struct cxgbi_sock *csk = pmap->port_csk[i]; in find_sock_on_port() local
425 if (csk) { in find_sock_on_port()
[all …]
Dlibcxgbi.h255 static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, in cxgbi_sock_set_flag() argument
258 __set_bit(flag, &csk->flags); in cxgbi_sock_set_flag()
261 csk, csk->state, csk->flags, flag); in cxgbi_sock_set_flag()
264 static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, in cxgbi_sock_clear_flag() argument
267 __clear_bit(flag, &csk->flags); in cxgbi_sock_clear_flag()
270 csk, csk->state, csk->flags, flag); in cxgbi_sock_clear_flag()
273 static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, in cxgbi_sock_flag() argument
276 if (csk == NULL) in cxgbi_sock_flag()
278 return test_bit(flag, &csk->flags); in cxgbi_sock_flag()
281 static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) in cxgbi_sock_set_state() argument
[all …]
/Linux-v4.19/drivers/crypto/chelsio/chtls/
Dchtls_cm.c53 struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC); in chtls_sock_create() local
55 if (!csk) in chtls_sock_create()
58 csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC); in chtls_sock_create()
59 if (!csk->txdata_skb_cache) { in chtls_sock_create()
60 kfree(csk); in chtls_sock_create()
64 kref_init(&csk->kref); in chtls_sock_create()
65 csk->cdev = cdev; in chtls_sock_create()
66 skb_queue_head_init(&csk->txq); in chtls_sock_create()
67 csk->wr_skb_head = NULL; in chtls_sock_create()
68 csk->wr_skb_tail = NULL; in chtls_sock_create()
[all …]
Dchtls_hw.c26 static void __set_tcb_field_direct(struct chtls_sock *csk, in __set_tcb_field_direct() argument
32 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid); in __set_tcb_field_direct()
33 req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid)); in __set_tcb_field_direct()
35 QUEUENO_V(csk->rss_qid)); in __set_tcb_field_direct()
48 struct chtls_sock *csk; in __set_tcb_field() local
53 csk = rcu_dereference_sk_user_data(sk); in __set_tcb_field()
56 __set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply); in __set_tcb_field()
57 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in __set_tcb_field()
68 struct chtls_sock *csk; in chtls_set_tcb_field() local
81 csk = rcu_dereference_sk_user_data(sk); in chtls_set_tcb_field()
[all …]
Dchtls_io.c28 static bool is_tls_tx(struct chtls_sock *csk) in is_tls_tx() argument
30 return csk->tlshws.txkey >= 0; in is_tls_tx()
33 static bool is_tls_rx(struct chtls_sock *csk) in is_tls_rx() argument
35 return csk->tlshws.rxkey >= 0; in is_tls_rx()
48 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in nos_ivs() local
50 return DIV_ROUND_UP(size, csk->tlshws.mfs); in nos_ivs()
96 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in create_flowc_wr_skb() local
104 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); in create_flowc_wr_skb()
112 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in send_flowc_wr() local
130 ret = cxgb4_immdata_send(csk->egress_dev, in send_flowc_wr()
[all …]
Dchtls.h366 static inline void csk_set_flag(struct chtls_sock *csk, in csk_set_flag() argument
369 __set_bit(flag, &csk->flags); in csk_set_flag()
372 static inline void csk_reset_flag(struct chtls_sock *csk, in csk_reset_flag() argument
375 __clear_bit(flag, &csk->flags); in csk_reset_flag()
378 static inline bool csk_conn_inline(const struct chtls_sock *csk) in csk_conn_inline() argument
380 return test_bit(CSK_CONN_INLINE, &csk->flags); in csk_conn_inline()
385 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in csk_flag() local
387 if (!csk_conn_inline(csk)) in csk_flag()
389 return test_bit(flag, &csk->flags); in csk_flag()
392 static inline int csk_flag_nochk(const struct chtls_sock *csk, in csk_flag_nochk() argument
[all …]
Dchtls_cm.h191 static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) in enqueue_wr() argument
197 if (!csk->wr_skb_head) in enqueue_wr()
198 csk->wr_skb_head = skb; in enqueue_wr()
200 WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb; in enqueue_wr()
201 csk->wr_skb_tail = skb; in enqueue_wr()
Dchtls_main.c470 struct chtls_sock *csk; in do_chtls_setsockopt() local
474 csk = rcu_dereference_sk_user_data(sk); in do_chtls_setsockopt()
493 crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; in do_chtls_setsockopt()
511 rc = chtls_setkey(csk, keylen, optname); in do_chtls_setsockopt()
/Linux-v4.19/drivers/net/ethernet/broadcom/
Dcnic.c152 static inline void csk_hold(struct cnic_sock *csk) in csk_hold() argument
154 atomic_inc(&csk->ref_count); in csk_hold()
157 static inline void csk_put(struct cnic_sock *csk) in csk_put() argument
159 atomic_dec(&csk->ref_count); in csk_put()
285 static int cnic_in_use(struct cnic_sock *csk) in cnic_in_use() argument
287 return test_bit(SK_F_INUSE, &csk->flags); in cnic_in_use()
319 struct cnic_sock *csk) in cnic_send_nlmsg() argument
332 if (csk) { in cnic_send_nlmsg()
338 path_req.handle = (u64) csk->l5_cid; in cnic_send_nlmsg()
339 if (test_bit(SK_F_IPV6, &csk->flags)) { in cnic_send_nlmsg()
[all …]
/Linux-v4.19/net/kcm/
Dkcmsock.c50 static void report_csk_error(struct sock *csk, int err) in report_csk_error() argument
52 csk->sk_err = EPIPE; in report_csk_error()
53 csk->sk_error_report(csk); in report_csk_error()
59 struct sock *csk = psock->sk; in kcm_abort_tx_psock() local
91 report_csk_error(csk, err); in kcm_abort_tx_psock()
1375 struct sock *csk; in kcm_attach() local
1386 csk = csock->sk; in kcm_attach()
1387 if (!csk) in kcm_attach()
1390 lock_sock(csk); in kcm_attach()
1393 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || in kcm_attach()
[all …]
/Linux-v4.19/drivers/isdn/mISDN/
Dsocket.c479 struct sock *csk; in data_sock_bind() local
503 sk_for_each(csk, &data_sockets.head) { in data_sock_bind()
504 if (sk == csk) in data_sock_bind()
506 if (_pms(csk)->dev != _pms(sk)->dev) in data_sock_bind()
508 if (csk->sk_protocol >= ISDN_P_B_START) in data_sock_bind()
510 if (IS_ISDN_P_TE(csk->sk_protocol) in data_sock_bind()
/Linux-v4.19/drivers/scsi/bnx2i/
Dbnx2i_iscsi.c1576 struct cnic_sock *csk; in bnx2i_host_get_param() local
1581 csk = bnx2i_ep->cm_sk; in bnx2i_host_get_param()
1582 if (test_bit(SK_F_IPV6, &csk->flags)) in bnx2i_host_get_param()
1583 len = sprintf(buf, "%pI6\n", csk->src_ip); in bnx2i_host_get_param()
1585 len = sprintf(buf, "%pI4\n", csk->src_ip); in bnx2i_host_get_param()
/Linux-v4.19/include/net/
Dsock.h1553 static inline bool sock_allow_reclassification(const struct sock *csk) in sock_allow_reclassification() argument
1555 struct sock *sk = (struct sock *)csk; in sock_allow_reclassification()