Lines Matching refs:cp
65 void rds_send_path_reset(struct rds_conn_path *cp) in rds_send_path_reset() argument
70 if (cp->cp_xmit_rm) { in rds_send_path_reset()
71 rm = cp->cp_xmit_rm; in rds_send_path_reset()
72 cp->cp_xmit_rm = NULL; in rds_send_path_reset()
81 cp->cp_xmit_sg = 0; in rds_send_path_reset()
82 cp->cp_xmit_hdr_off = 0; in rds_send_path_reset()
83 cp->cp_xmit_data_off = 0; in rds_send_path_reset()
84 cp->cp_xmit_atomic_sent = 0; in rds_send_path_reset()
85 cp->cp_xmit_rdma_sent = 0; in rds_send_path_reset()
86 cp->cp_xmit_data_sent = 0; in rds_send_path_reset()
88 cp->cp_conn->c_map_queued = 0; in rds_send_path_reset()
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_path_reset()
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_path_reset()
94 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_reset()
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_reset()
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); in rds_send_path_reset()
100 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_reset()
104 static int acquire_in_xmit(struct rds_conn_path *cp) in acquire_in_xmit() argument
106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; in acquire_in_xmit()
109 static void release_in_xmit(struct rds_conn_path *cp) in release_in_xmit() argument
111 clear_bit(RDS_IN_XMIT, &cp->cp_flags); in release_in_xmit()
119 if (waitqueue_active(&cp->cp_waitq)) in release_in_xmit()
120 wake_up_all(&cp->cp_waitq); in release_in_xmit()
137 int rds_send_xmit(struct rds_conn_path *cp) in rds_send_xmit() argument
139 struct rds_connection *conn = cp->cp_conn; in rds_send_xmit()
159 if (!acquire_in_xmit(cp)) { in rds_send_xmit()
165 if (rds_destroy_pending(cp->cp_conn)) { in rds_send_xmit()
166 release_in_xmit(cp); in rds_send_xmit()
179 send_gen = READ_ONCE(cp->cp_send_gen) + 1; in rds_send_xmit()
180 WRITE_ONCE(cp->cp_send_gen, send_gen); in rds_send_xmit()
186 if (!rds_conn_path_up(cp)) { in rds_send_xmit()
187 release_in_xmit(cp); in rds_send_xmit()
193 conn->c_trans->xmit_path_prepare(cp); in rds_send_xmit()
201 rm = cp->cp_xmit_rm; in rds_send_xmit()
214 rm->m_inc.i_conn_path = cp; in rds_send_xmit()
215 rm->m_inc.i_conn = cp->cp_conn; in rds_send_xmit()
217 cp->cp_xmit_rm = rm; in rds_send_xmit()
240 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
242 if (!list_empty(&cp->cp_send_queue)) { in rds_send_xmit()
243 rm = list_entry(cp->cp_send_queue.next, in rds_send_xmit()
253 &cp->cp_retrans); in rds_send_xmit()
256 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
271 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
274 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
280 if (cp->cp_unacked_packets == 0 || in rds_send_xmit()
281 cp->cp_unacked_bytes < len) { in rds_send_xmit()
284 cp->cp_unacked_packets = in rds_send_xmit()
286 cp->cp_unacked_bytes = in rds_send_xmit()
290 cp->cp_unacked_bytes -= len; in rds_send_xmit()
291 cp->cp_unacked_packets--; in rds_send_xmit()
294 cp->cp_xmit_rm = rm; in rds_send_xmit()
298 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit()
310 cp->cp_xmit_rdma_sent = 1; in rds_send_xmit()
314 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { in rds_send_xmit()
326 cp->cp_xmit_atomic_sent = 1; in rds_send_xmit()
352 if (rm->data.op_active && !cp->cp_xmit_data_sent) { in rds_send_xmit()
356 cp->cp_xmit_hdr_off, in rds_send_xmit()
357 cp->cp_xmit_sg, in rds_send_xmit()
358 cp->cp_xmit_data_off); in rds_send_xmit()
362 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
365 cp->cp_xmit_hdr_off); in rds_send_xmit()
366 cp->cp_xmit_hdr_off += tmp; in rds_send_xmit()
370 sg = &rm->data.op_sg[cp->cp_xmit_sg]; in rds_send_xmit()
373 cp->cp_xmit_data_off); in rds_send_xmit()
374 cp->cp_xmit_data_off += tmp; in rds_send_xmit()
376 if (cp->cp_xmit_data_off == sg->length) { in rds_send_xmit()
377 cp->cp_xmit_data_off = 0; in rds_send_xmit()
379 cp->cp_xmit_sg++; in rds_send_xmit()
380 BUG_ON(ret != 0 && cp->cp_xmit_sg == in rds_send_xmit()
385 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
386 (cp->cp_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
387 cp->cp_xmit_data_sent = 1; in rds_send_xmit()
395 if (!rm->data.op_active || cp->cp_xmit_data_sent) { in rds_send_xmit()
396 cp->cp_xmit_rm = NULL; in rds_send_xmit()
397 cp->cp_xmit_sg = 0; in rds_send_xmit()
398 cp->cp_xmit_hdr_off = 0; in rds_send_xmit()
399 cp->cp_xmit_data_off = 0; in rds_send_xmit()
400 cp->cp_xmit_rdma_sent = 0; in rds_send_xmit()
401 cp->cp_xmit_atomic_sent = 0; in rds_send_xmit()
402 cp->cp_xmit_data_sent = 0; in rds_send_xmit()
410 conn->c_trans->xmit_path_complete(cp); in rds_send_xmit()
411 release_in_xmit(cp); in rds_send_xmit()
440 raced = send_gen != READ_ONCE(cp->cp_send_gen); in rds_send_xmit()
443 !list_empty(&cp->cp_send_queue)) && !raced) { in rds_send_xmit()
447 if (rds_destroy_pending(cp->cp_conn)) in rds_send_xmit()
450 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_xmit()
676 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, in rds_send_path_drop_acked() argument
683 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_drop_acked()
685 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_drop_acked()
697 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_drop_acked()
716 struct rds_conn_path *cp; in rds_send_drop_to() local
747 cp = rm->m_inc.i_conn_path; in rds_send_drop_to()
749 cp = &conn->c_path[0]; in rds_send_drop_to()
751 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_drop_to()
758 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
762 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
809 struct rds_conn_path *cp, in rds_send_queue_rm() argument
854 rm->m_inc.i_conn_path = cp; in rds_send_queue_rm()
857 spin_lock(&cp->cp_lock); in rds_send_queue_rm()
858 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); in rds_send_queue_rm()
859 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_queue_rm()
861 spin_unlock(&cp->cp_lock); in rds_send_queue_rm()
1374 rds_send_probe(struct rds_conn_path *cp, __be16 sport, in rds_send_probe() argument
1387 rm->m_daddr = cp->cp_conn->c_faddr; in rds_send_probe()
1390 rds_conn_path_connect_if_down(cp); in rds_send_probe()
1392 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); in rds_send_probe()
1396 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_probe()
1397 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_probe()
1400 rm->m_inc.i_conn = cp->cp_conn; in rds_send_probe()
1401 rm->m_inc.i_conn_path = cp; in rds_send_probe()
1404 cp->cp_next_tx_seq); in rds_send_probe()
1406 cp->cp_next_tx_seq++; in rds_send_probe()
1409 cp->cp_conn->c_trans->t_mp_capable) { in rds_send_probe()
1411 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num); in rds_send_probe()
1421 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_probe()
1428 if (!rds_destroy_pending(cp->cp_conn)) in rds_send_probe()
1429 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_probe()
1442 rds_send_pong(struct rds_conn_path *cp, __be16 dport) in rds_send_pong() argument
1444 return rds_send_probe(cp, 0, dport, 0); in rds_send_pong()
1451 struct rds_conn_path *cp = &conn->c_path[cp_index]; in rds_send_ping() local
1453 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_ping()
1455 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1459 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1460 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0); in rds_send_ping()