1 /*
2    drbd_receiver.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 
26 #include <linux/module.h>
27 
28 #include <linux/uaccess.h>
29 #include <net/sock.h>
30 
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <uapi/linux/sched/types.h>
40 #include <linux/sched/signal.h>
41 #include <linux/pkt_sched.h>
42 #define __KERNEL_SYSCALLS__
43 #include <linux/unistd.h>
44 #include <linux/vmalloc.h>
45 #include <linux/random.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
48 #include "drbd_int.h"
49 #include "drbd_protocol.h"
50 #include "drbd_req.h"
51 #include "drbd_vli.h"
52 
53 #define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME)
54 
55 struct packet_info {
56 	enum drbd_packet cmd;
57 	unsigned int size;
58 	unsigned int vnr;
59 	void *data;
60 };
61 
62 enum finish_epoch {
63 	FE_STILL_LIVE,
64 	FE_DESTROYED,
65 	FE_RECYCLED,
66 };
67 
68 static int drbd_do_features(struct drbd_connection *connection);
69 static int drbd_do_auth(struct drbd_connection *connection);
70 static int drbd_disconnected(struct drbd_peer_device *);
71 static void conn_wait_active_ee_empty(struct drbd_connection *connection);
72 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
73 static int e_end_block(struct drbd_work *, int);
74 
75 
76 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
77 
78 /*
79  * some helper functions to deal with single linked page lists,
80  * page->private being our "next" pointer.
81  */
82 
83 /* If at least n pages are linked at head, get n pages off.
84  * Otherwise, don't modify head, and return NULL.
85  * Locking is the responsibility of the caller.
86  */
page_chain_del(struct page ** head,int n)87 static struct page *page_chain_del(struct page **head, int n)
88 {
89 	struct page *page;
90 	struct page *tmp;
91 
92 	BUG_ON(!n);
93 	BUG_ON(!head);
94 
95 	page = *head;
96 
97 	if (!page)
98 		return NULL;
99 
100 	while (page) {
101 		tmp = page_chain_next(page);
102 		if (--n == 0)
103 			break; /* found sufficient pages */
104 		if (tmp == NULL)
105 			/* insufficient pages, don't use any of them. */
106 			return NULL;
107 		page = tmp;
108 	}
109 
110 	/* add end of list marker for the returned list */
111 	set_page_private(page, 0);
112 	/* actual return value, and adjustment of head */
113 	page = *head;
114 	*head = tmp;
115 	return page;
116 }
117 
118 /* may be used outside of locks to find the tail of a (usually short)
119  * "private" page chain, before adding it back to a global chain head
120  * with page_chain_add() under a spinlock. */
page_chain_tail(struct page * page,int * len)121 static struct page *page_chain_tail(struct page *page, int *len)
122 {
123 	struct page *tmp;
124 	int i = 1;
125 	while ((tmp = page_chain_next(page)))
126 		++i, page = tmp;
127 	if (len)
128 		*len = i;
129 	return page;
130 }
131 
page_chain_free(struct page * page)132 static int page_chain_free(struct page *page)
133 {
134 	struct page *tmp;
135 	int i = 0;
136 	page_chain_for_each_safe(page, tmp) {
137 		put_page(page);
138 		++i;
139 	}
140 	return i;
141 }
142 
page_chain_add(struct page ** head,struct page * chain_first,struct page * chain_last)143 static void page_chain_add(struct page **head,
144 		struct page *chain_first, struct page *chain_last)
145 {
146 #if 1
147 	struct page *tmp;
148 	tmp = page_chain_tail(chain_first, NULL);
149 	BUG_ON(tmp != chain_last);
150 #endif
151 
152 	/* add chain to head */
153 	set_page_private(chain_last, (unsigned long)*head);
154 	*head = chain_first;
155 }
156 
__drbd_alloc_pages(struct drbd_device * device,unsigned int number)157 static struct page *__drbd_alloc_pages(struct drbd_device *device,
158 				       unsigned int number)
159 {
160 	struct page *page = NULL;
161 	struct page *tmp = NULL;
162 	unsigned int i = 0;
163 
164 	/* Yes, testing drbd_pp_vacant outside the lock is racy.
165 	 * So what. It saves a spin_lock. */
166 	if (drbd_pp_vacant >= number) {
167 		spin_lock(&drbd_pp_lock);
168 		page = page_chain_del(&drbd_pp_pool, number);
169 		if (page)
170 			drbd_pp_vacant -= number;
171 		spin_unlock(&drbd_pp_lock);
172 		if (page)
173 			return page;
174 	}
175 
176 	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
177 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
178 	 * which in turn might block on the other node at this very place.  */
179 	for (i = 0; i < number; i++) {
180 		tmp = alloc_page(GFP_TRY);
181 		if (!tmp)
182 			break;
183 		set_page_private(tmp, (unsigned long)page);
184 		page = tmp;
185 	}
186 
187 	if (i == number)
188 		return page;
189 
190 	/* Not enough pages immediately available this time.
191 	 * No need to jump around here, drbd_alloc_pages will retry this
192 	 * function "soon". */
193 	if (page) {
194 		tmp = page_chain_tail(page, NULL);
195 		spin_lock(&drbd_pp_lock);
196 		page_chain_add(&drbd_pp_pool, page, tmp);
197 		drbd_pp_vacant += i;
198 		spin_unlock(&drbd_pp_lock);
199 	}
200 	return NULL;
201 }
202 
reclaim_finished_net_peer_reqs(struct drbd_device * device,struct list_head * to_be_freed)203 static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
204 					   struct list_head *to_be_freed)
205 {
206 	struct drbd_peer_request *peer_req, *tmp;
207 
208 	/* The EEs are always appended to the end of the list. Since
209 	   they are sent in order over the wire, they have to finish
210 	   in order. As soon as we see the first not finished we can
211 	   stop to examine the list... */
212 
213 	list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
214 		if (drbd_peer_req_has_active_page(peer_req))
215 			break;
216 		list_move(&peer_req->w.list, to_be_freed);
217 	}
218 }
219 
drbd_reclaim_net_peer_reqs(struct drbd_device * device)220 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
221 {
222 	LIST_HEAD(reclaimed);
223 	struct drbd_peer_request *peer_req, *t;
224 
225 	spin_lock_irq(&device->resource->req_lock);
226 	reclaim_finished_net_peer_reqs(device, &reclaimed);
227 	spin_unlock_irq(&device->resource->req_lock);
228 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
229 		drbd_free_net_peer_req(device, peer_req);
230 }
231 
conn_reclaim_net_peer_reqs(struct drbd_connection * connection)232 static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
233 {
234 	struct drbd_peer_device *peer_device;
235 	int vnr;
236 
237 	rcu_read_lock();
238 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
239 		struct drbd_device *device = peer_device->device;
240 		if (!atomic_read(&device->pp_in_use_by_net))
241 			continue;
242 
243 		kref_get(&device->kref);
244 		rcu_read_unlock();
245 		drbd_reclaim_net_peer_reqs(device);
246 		kref_put(&device->kref, drbd_destroy_device);
247 		rcu_read_lock();
248 	}
249 	rcu_read_unlock();
250 }
251 
252 /**
253  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
254  * @device:	DRBD device.
255  * @number:	number of pages requested
256  * @retry:	whether to retry, if not enough pages are available right now
257  *
258  * Tries to allocate number pages, first from our own page pool, then from
259  * the kernel.
260  * Possibly retry until DRBD frees sufficient pages somewhere else.
261  *
262  * If this allocation would exceed the max_buffers setting, we throttle
263  * allocation (schedule_timeout) to give the system some room to breathe.
264  *
265  * We do not use max-buffers as hard limit, because it could lead to
266  * congestion and further to a distributed deadlock during online-verify or
267  * (checksum based) resync, if the max-buffers, socket buffer sizes and
268  * resync-rate settings are mis-configured.
269  *
270  * Returns a page chain linked via page->private.
271  */
drbd_alloc_pages(struct drbd_peer_device * peer_device,unsigned int number,bool retry)272 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
273 			      bool retry)
274 {
275 	struct drbd_device *device = peer_device->device;
276 	struct page *page = NULL;
277 	struct net_conf *nc;
278 	DEFINE_WAIT(wait);
279 	unsigned int mxb;
280 
281 	rcu_read_lock();
282 	nc = rcu_dereference(peer_device->connection->net_conf);
283 	mxb = nc ? nc->max_buffers : 1000000;
284 	rcu_read_unlock();
285 
286 	if (atomic_read(&device->pp_in_use) < mxb)
287 		page = __drbd_alloc_pages(device, number);
288 
289 	/* Try to keep the fast path fast, but occasionally we need
290 	 * to reclaim the pages we lended to the network stack. */
291 	if (page && atomic_read(&device->pp_in_use_by_net) > 512)
292 		drbd_reclaim_net_peer_reqs(device);
293 
294 	while (page == NULL) {
295 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
296 
297 		drbd_reclaim_net_peer_reqs(device);
298 
299 		if (atomic_read(&device->pp_in_use) < mxb) {
300 			page = __drbd_alloc_pages(device, number);
301 			if (page)
302 				break;
303 		}
304 
305 		if (!retry)
306 			break;
307 
308 		if (signal_pending(current)) {
309 			drbd_warn(device, "drbd_alloc_pages interrupted!\n");
310 			break;
311 		}
312 
313 		if (schedule_timeout(HZ/10) == 0)
314 			mxb = UINT_MAX;
315 	}
316 	finish_wait(&drbd_pp_wait, &wait);
317 
318 	if (page)
319 		atomic_add(number, &device->pp_in_use);
320 	return page;
321 }
322 
323 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
324  * Is also used from inside an other spin_lock_irq(&resource->req_lock);
325  * Either links the page chain back to the global pool,
326  * or returns all pages to the system. */
drbd_free_pages(struct drbd_device * device,struct page * page,int is_net)327 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
328 {
329 	atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
330 	int i;
331 
332 	if (page == NULL)
333 		return;
334 
335 	if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
336 		i = page_chain_free(page);
337 	else {
338 		struct page *tmp;
339 		tmp = page_chain_tail(page, &i);
340 		spin_lock(&drbd_pp_lock);
341 		page_chain_add(&drbd_pp_pool, page, tmp);
342 		drbd_pp_vacant += i;
343 		spin_unlock(&drbd_pp_lock);
344 	}
345 	i = atomic_sub_return(i, a);
346 	if (i < 0)
347 		drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
348 			is_net ? "pp_in_use_by_net" : "pp_in_use", i);
349 	wake_up(&drbd_pp_wait);
350 }
351 
352 /*
353 You need to hold the req_lock:
354  _drbd_wait_ee_list_empty()
355 
356 You must not have the req_lock:
357  drbd_free_peer_req()
358  drbd_alloc_peer_req()
359  drbd_free_peer_reqs()
360  drbd_ee_fix_bhs()
361  drbd_finish_peer_reqs()
362  drbd_clear_done_ee()
363  drbd_wait_ee_list_empty()
364 */
365 
366 /* normal: payload_size == request size (bi_size)
367  * w_same: payload_size == logical_block_size
368  * trim: payload_size == 0 */
369 struct drbd_peer_request *
drbd_alloc_peer_req(struct drbd_peer_device * peer_device,u64 id,sector_t sector,unsigned int request_size,unsigned int payload_size,gfp_t gfp_mask)370 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
371 		    unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
372 {
373 	struct drbd_device *device = peer_device->device;
374 	struct drbd_peer_request *peer_req;
375 	struct page *page = NULL;
376 	unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
377 
378 	if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
379 		return NULL;
380 
381 	peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
382 	if (!peer_req) {
383 		if (!(gfp_mask & __GFP_NOWARN))
384 			drbd_err(device, "%s: allocation failed\n", __func__);
385 		return NULL;
386 	}
387 
388 	if (nr_pages) {
389 		page = drbd_alloc_pages(peer_device, nr_pages,
390 					gfpflags_allow_blocking(gfp_mask));
391 		if (!page)
392 			goto fail;
393 	}
394 
395 	memset(peer_req, 0, sizeof(*peer_req));
396 	INIT_LIST_HEAD(&peer_req->w.list);
397 	drbd_clear_interval(&peer_req->i);
398 	peer_req->i.size = request_size;
399 	peer_req->i.sector = sector;
400 	peer_req->submit_jif = jiffies;
401 	peer_req->peer_device = peer_device;
402 	peer_req->pages = page;
403 	/*
404 	 * The block_id is opaque to the receiver.  It is not endianness
405 	 * converted, and sent back to the sender unchanged.
406 	 */
407 	peer_req->block_id = id;
408 
409 	return peer_req;
410 
411  fail:
412 	mempool_free(peer_req, &drbd_ee_mempool);
413 	return NULL;
414 }
415 
__drbd_free_peer_req(struct drbd_device * device,struct drbd_peer_request * peer_req,int is_net)416 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
417 		       int is_net)
418 {
419 	might_sleep();
420 	if (peer_req->flags & EE_HAS_DIGEST)
421 		kfree(peer_req->digest);
422 	drbd_free_pages(device, peer_req->pages, is_net);
423 	D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
424 	D_ASSERT(device, drbd_interval_empty(&peer_req->i));
425 	if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
426 		peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
427 		drbd_al_complete_io(device, &peer_req->i);
428 	}
429 	mempool_free(peer_req, &drbd_ee_mempool);
430 }
431 
drbd_free_peer_reqs(struct drbd_device * device,struct list_head * list)432 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
433 {
434 	LIST_HEAD(work_list);
435 	struct drbd_peer_request *peer_req, *t;
436 	int count = 0;
437 	int is_net = list == &device->net_ee;
438 
439 	spin_lock_irq(&device->resource->req_lock);
440 	list_splice_init(list, &work_list);
441 	spin_unlock_irq(&device->resource->req_lock);
442 
443 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
444 		__drbd_free_peer_req(device, peer_req, is_net);
445 		count++;
446 	}
447 	return count;
448 }
449 
450 /*
451  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
452  */
drbd_finish_peer_reqs(struct drbd_device * device)453 static int drbd_finish_peer_reqs(struct drbd_device *device)
454 {
455 	LIST_HEAD(work_list);
456 	LIST_HEAD(reclaimed);
457 	struct drbd_peer_request *peer_req, *t;
458 	int err = 0;
459 
460 	spin_lock_irq(&device->resource->req_lock);
461 	reclaim_finished_net_peer_reqs(device, &reclaimed);
462 	list_splice_init(&device->done_ee, &work_list);
463 	spin_unlock_irq(&device->resource->req_lock);
464 
465 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
466 		drbd_free_net_peer_req(device, peer_req);
467 
468 	/* possible callbacks here:
469 	 * e_end_block, and e_end_resync_block, e_send_superseded.
470 	 * all ignore the last argument.
471 	 */
472 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
473 		int err2;
474 
475 		/* list_del not necessary, next/prev members not touched */
476 		err2 = peer_req->w.cb(&peer_req->w, !!err);
477 		if (!err)
478 			err = err2;
479 		drbd_free_peer_req(device, peer_req);
480 	}
481 	wake_up(&device->ee_wait);
482 
483 	return err;
484 }
485 
_drbd_wait_ee_list_empty(struct drbd_device * device,struct list_head * head)486 static void _drbd_wait_ee_list_empty(struct drbd_device *device,
487 				     struct list_head *head)
488 {
489 	DEFINE_WAIT(wait);
490 
491 	/* avoids spin_lock/unlock
492 	 * and calling prepare_to_wait in the fast path */
493 	while (!list_empty(head)) {
494 		prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
495 		spin_unlock_irq(&device->resource->req_lock);
496 		io_schedule();
497 		finish_wait(&device->ee_wait, &wait);
498 		spin_lock_irq(&device->resource->req_lock);
499 	}
500 }
501 
drbd_wait_ee_list_empty(struct drbd_device * device,struct list_head * head)502 static void drbd_wait_ee_list_empty(struct drbd_device *device,
503 				    struct list_head *head)
504 {
505 	spin_lock_irq(&device->resource->req_lock);
506 	_drbd_wait_ee_list_empty(device, head);
507 	spin_unlock_irq(&device->resource->req_lock);
508 }
509 
drbd_recv_short(struct socket * sock,void * buf,size_t size,int flags)510 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
511 {
512 	struct kvec iov = {
513 		.iov_base = buf,
514 		.iov_len = size,
515 	};
516 	struct msghdr msg = {
517 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
518 	};
519 	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
520 	return sock_recvmsg(sock, &msg, msg.msg_flags);
521 }
522 
drbd_recv(struct drbd_connection * connection,void * buf,size_t size)523 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
524 {
525 	int rv;
526 
527 	rv = drbd_recv_short(connection->data.socket, buf, size, 0);
528 
529 	if (rv < 0) {
530 		if (rv == -ECONNRESET)
531 			drbd_info(connection, "sock was reset by peer\n");
532 		else if (rv != -ERESTARTSYS)
533 			drbd_err(connection, "sock_recvmsg returned %d\n", rv);
534 	} else if (rv == 0) {
535 		if (test_bit(DISCONNECT_SENT, &connection->flags)) {
536 			long t;
537 			rcu_read_lock();
538 			t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
539 			rcu_read_unlock();
540 
541 			t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
542 
543 			if (t)
544 				goto out;
545 		}
546 		drbd_info(connection, "sock was shut down by peer\n");
547 	}
548 
549 	if (rv != size)
550 		conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
551 
552 out:
553 	return rv;
554 }
555 
drbd_recv_all(struct drbd_connection * connection,void * buf,size_t size)556 static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
557 {
558 	int err;
559 
560 	err = drbd_recv(connection, buf, size);
561 	if (err != size) {
562 		if (err >= 0)
563 			err = -EIO;
564 	} else
565 		err = 0;
566 	return err;
567 }
568 
drbd_recv_all_warn(struct drbd_connection * connection,void * buf,size_t size)569 static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
570 {
571 	int err;
572 
573 	err = drbd_recv_all(connection, buf, size);
574 	if (err && !signal_pending(current))
575 		drbd_warn(connection, "short read (expected size %d)\n", (int)size);
576 	return err;
577 }
578 
579 /* quoting tcp(7):
580  *   On individual connections, the socket buffer size must be set prior to the
581  *   listen(2) or connect(2) calls in order to have it take effect.
582  * This is our wrapper to do so.
583  */
drbd_setbufsize(struct socket * sock,unsigned int snd,unsigned int rcv)584 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
585 		unsigned int rcv)
586 {
587 	/* open coded SO_SNDBUF, SO_RCVBUF */
588 	if (snd) {
589 		sock->sk->sk_sndbuf = snd;
590 		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591 	}
592 	if (rcv) {
593 		sock->sk->sk_rcvbuf = rcv;
594 		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
595 	}
596 }
597 
drbd_try_connect(struct drbd_connection * connection)598 static struct socket *drbd_try_connect(struct drbd_connection *connection)
599 {
600 	const char *what;
601 	struct socket *sock;
602 	struct sockaddr_in6 src_in6;
603 	struct sockaddr_in6 peer_in6;
604 	struct net_conf *nc;
605 	int err, peer_addr_len, my_addr_len;
606 	int sndbuf_size, rcvbuf_size, connect_int;
607 	int disconnect_on_error = 1;
608 
609 	rcu_read_lock();
610 	nc = rcu_dereference(connection->net_conf);
611 	if (!nc) {
612 		rcu_read_unlock();
613 		return NULL;
614 	}
615 	sndbuf_size = nc->sndbuf_size;
616 	rcvbuf_size = nc->rcvbuf_size;
617 	connect_int = nc->connect_int;
618 	rcu_read_unlock();
619 
620 	my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
621 	memcpy(&src_in6, &connection->my_addr, my_addr_len);
622 
623 	if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
624 		src_in6.sin6_port = 0;
625 	else
626 		((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
627 
628 	peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
629 	memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
630 
631 	what = "sock_create_kern";
632 	err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
633 			       SOCK_STREAM, IPPROTO_TCP, &sock);
634 	if (err < 0) {
635 		sock = NULL;
636 		goto out;
637 	}
638 
639 	sock->sk->sk_rcvtimeo =
640 	sock->sk->sk_sndtimeo = connect_int * HZ;
641 	drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
642 
643        /* explicitly bind to the configured IP as source IP
644 	*  for the outgoing connections.
645 	*  This is needed for multihomed hosts and to be
646 	*  able to use lo: interfaces for drbd.
647 	* Make sure to use 0 as port number, so linux selects
648 	*  a free one dynamically.
649 	*/
650 	what = "bind before connect";
651 	err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
652 	if (err < 0)
653 		goto out;
654 
655 	/* connect may fail, peer not yet available.
656 	 * stay C_WF_CONNECTION, don't go Disconnecting! */
657 	disconnect_on_error = 0;
658 	what = "connect";
659 	err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
660 
661 out:
662 	if (err < 0) {
663 		if (sock) {
664 			sock_release(sock);
665 			sock = NULL;
666 		}
667 		switch (-err) {
668 			/* timeout, busy, signal pending */
669 		case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
670 		case EINTR: case ERESTARTSYS:
671 			/* peer not (yet) available, network problem */
672 		case ECONNREFUSED: case ENETUNREACH:
673 		case EHOSTDOWN:    case EHOSTUNREACH:
674 			disconnect_on_error = 0;
675 			break;
676 		default:
677 			drbd_err(connection, "%s failed, err = %d\n", what, err);
678 		}
679 		if (disconnect_on_error)
680 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
681 	}
682 
683 	return sock;
684 }
685 
686 struct accept_wait_data {
687 	struct drbd_connection *connection;
688 	struct socket *s_listen;
689 	struct completion door_bell;
690 	void (*original_sk_state_change)(struct sock *sk);
691 
692 };
693 
drbd_incoming_connection(struct sock * sk)694 static void drbd_incoming_connection(struct sock *sk)
695 {
696 	struct accept_wait_data *ad = sk->sk_user_data;
697 	void (*state_change)(struct sock *sk);
698 
699 	state_change = ad->original_sk_state_change;
700 	if (sk->sk_state == TCP_ESTABLISHED)
701 		complete(&ad->door_bell);
702 	state_change(sk);
703 }
704 
prepare_listen_socket(struct drbd_connection * connection,struct accept_wait_data * ad)705 static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
706 {
707 	int err, sndbuf_size, rcvbuf_size, my_addr_len;
708 	struct sockaddr_in6 my_addr;
709 	struct socket *s_listen;
710 	struct net_conf *nc;
711 	const char *what;
712 
713 	rcu_read_lock();
714 	nc = rcu_dereference(connection->net_conf);
715 	if (!nc) {
716 		rcu_read_unlock();
717 		return -EIO;
718 	}
719 	sndbuf_size = nc->sndbuf_size;
720 	rcvbuf_size = nc->rcvbuf_size;
721 	rcu_read_unlock();
722 
723 	my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
724 	memcpy(&my_addr, &connection->my_addr, my_addr_len);
725 
726 	what = "sock_create_kern";
727 	err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
728 			       SOCK_STREAM, IPPROTO_TCP, &s_listen);
729 	if (err) {
730 		s_listen = NULL;
731 		goto out;
732 	}
733 
734 	s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
735 	drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
736 
737 	what = "bind before listen";
738 	err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
739 	if (err < 0)
740 		goto out;
741 
742 	ad->s_listen = s_listen;
743 	write_lock_bh(&s_listen->sk->sk_callback_lock);
744 	ad->original_sk_state_change = s_listen->sk->sk_state_change;
745 	s_listen->sk->sk_state_change = drbd_incoming_connection;
746 	s_listen->sk->sk_user_data = ad;
747 	write_unlock_bh(&s_listen->sk->sk_callback_lock);
748 
749 	what = "listen";
750 	err = s_listen->ops->listen(s_listen, 5);
751 	if (err < 0)
752 		goto out;
753 
754 	return 0;
755 out:
756 	if (s_listen)
757 		sock_release(s_listen);
758 	if (err < 0) {
759 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
760 			drbd_err(connection, "%s failed, err = %d\n", what, err);
761 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
762 		}
763 	}
764 
765 	return -EIO;
766 }
767 
unregister_state_change(struct sock * sk,struct accept_wait_data * ad)768 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
769 {
770 	write_lock_bh(&sk->sk_callback_lock);
771 	sk->sk_state_change = ad->original_sk_state_change;
772 	sk->sk_user_data = NULL;
773 	write_unlock_bh(&sk->sk_callback_lock);
774 }
775 
drbd_wait_for_connect(struct drbd_connection * connection,struct accept_wait_data * ad)776 static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
777 {
778 	int timeo, connect_int, err = 0;
779 	struct socket *s_estab = NULL;
780 	struct net_conf *nc;
781 
782 	rcu_read_lock();
783 	nc = rcu_dereference(connection->net_conf);
784 	if (!nc) {
785 		rcu_read_unlock();
786 		return NULL;
787 	}
788 	connect_int = nc->connect_int;
789 	rcu_read_unlock();
790 
791 	timeo = connect_int * HZ;
792 	/* 28.5% random jitter */
793 	timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
794 
795 	err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
796 	if (err <= 0)
797 		return NULL;
798 
799 	err = kernel_accept(ad->s_listen, &s_estab, 0);
800 	if (err < 0) {
801 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
802 			drbd_err(connection, "accept failed, err = %d\n", err);
803 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
804 		}
805 	}
806 
807 	if (s_estab)
808 		unregister_state_change(s_estab->sk, ad);
809 
810 	return s_estab;
811 }
812 
813 static int decode_header(struct drbd_connection *, void *, struct packet_info *);
814 
send_first_packet(struct drbd_connection * connection,struct drbd_socket * sock,enum drbd_packet cmd)815 static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
816 			     enum drbd_packet cmd)
817 {
818 	if (!conn_prepare_command(connection, sock))
819 		return -EIO;
820 	return conn_send_command(connection, sock, cmd, 0, NULL, 0);
821 }
822 
receive_first_packet(struct drbd_connection * connection,struct socket * sock)823 static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
824 {
825 	unsigned int header_size = drbd_header_size(connection);
826 	struct packet_info pi;
827 	struct net_conf *nc;
828 	int err;
829 
830 	rcu_read_lock();
831 	nc = rcu_dereference(connection->net_conf);
832 	if (!nc) {
833 		rcu_read_unlock();
834 		return -EIO;
835 	}
836 	sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
837 	rcu_read_unlock();
838 
839 	err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
840 	if (err != header_size) {
841 		if (err >= 0)
842 			err = -EIO;
843 		return err;
844 	}
845 	err = decode_header(connection, connection->data.rbuf, &pi);
846 	if (err)
847 		return err;
848 	return pi.cmd;
849 }
850 
851 /**
852  * drbd_socket_okay() - Free the socket if its connection is not okay
853  * @sock:	pointer to the pointer to the socket.
854  */
drbd_socket_okay(struct socket ** sock)855 static bool drbd_socket_okay(struct socket **sock)
856 {
857 	int rr;
858 	char tb[4];
859 
860 	if (!*sock)
861 		return false;
862 
863 	rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
864 
865 	if (rr > 0 || rr == -EAGAIN) {
866 		return true;
867 	} else {
868 		sock_release(*sock);
869 		*sock = NULL;
870 		return false;
871 	}
872 }
873 
connection_established(struct drbd_connection * connection,struct socket ** sock1,struct socket ** sock2)874 static bool connection_established(struct drbd_connection *connection,
875 				   struct socket **sock1,
876 				   struct socket **sock2)
877 {
878 	struct net_conf *nc;
879 	int timeout;
880 	bool ok;
881 
882 	if (!*sock1 || !*sock2)
883 		return false;
884 
885 	rcu_read_lock();
886 	nc = rcu_dereference(connection->net_conf);
887 	timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
888 	rcu_read_unlock();
889 	schedule_timeout_interruptible(timeout);
890 
891 	ok = drbd_socket_okay(sock1);
892 	ok = drbd_socket_okay(sock2) && ok;
893 
894 	return ok;
895 }
896 
897 /* Gets called if a connection is established, or if a new minor gets created
898    in a connection */
drbd_connected(struct drbd_peer_device * peer_device)899 int drbd_connected(struct drbd_peer_device *peer_device)
900 {
901 	struct drbd_device *device = peer_device->device;
902 	int err;
903 
904 	atomic_set(&device->packet_seq, 0);
905 	device->peer_seq = 0;
906 
907 	device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
908 		&peer_device->connection->cstate_mutex :
909 		&device->own_state_mutex;
910 
911 	err = drbd_send_sync_param(peer_device);
912 	if (!err)
913 		err = drbd_send_sizes(peer_device, 0, 0);
914 	if (!err)
915 		err = drbd_send_uuids(peer_device);
916 	if (!err)
917 		err = drbd_send_current_state(peer_device);
918 	clear_bit(USE_DEGR_WFC_T, &device->flags);
919 	clear_bit(RESIZE_PENDING, &device->flags);
920 	atomic_set(&device->ap_in_flight, 0);
921 	mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
922 	return err;
923 }
924 
925 /*
926  * return values:
927  *   1 yes, we have a valid connection
928  *   0 oops, did not work out, please try again
929  *  -1 peer talks different language,
930  *     no point in trying again, please go standalone.
931  *  -2 We do not have a network config...
932  */
conn_connect(struct drbd_connection * connection)933 static int conn_connect(struct drbd_connection *connection)
934 {
935 	struct drbd_socket sock, msock;
936 	struct drbd_peer_device *peer_device;
937 	struct net_conf *nc;
938 	int vnr, timeout, h;
939 	bool discard_my_data, ok;
940 	enum drbd_state_rv rv;
941 	struct accept_wait_data ad = {
942 		.connection = connection,
943 		.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
944 	};
945 
946 	clear_bit(DISCONNECT_SENT, &connection->flags);
947 	if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
948 		return -2;
949 
950 	mutex_init(&sock.mutex);
951 	sock.sbuf = connection->data.sbuf;
952 	sock.rbuf = connection->data.rbuf;
953 	sock.socket = NULL;
954 	mutex_init(&msock.mutex);
955 	msock.sbuf = connection->meta.sbuf;
956 	msock.rbuf = connection->meta.rbuf;
957 	msock.socket = NULL;
958 
959 	/* Assume that the peer only understands protocol 80 until we know better.  */
960 	connection->agreed_pro_version = 80;
961 
962 	if (prepare_listen_socket(connection, &ad))
963 		return 0;
964 
965 	do {
966 		struct socket *s;
967 
968 		s = drbd_try_connect(connection);
969 		if (s) {
970 			if (!sock.socket) {
971 				sock.socket = s;
972 				send_first_packet(connection, &sock, P_INITIAL_DATA);
973 			} else if (!msock.socket) {
974 				clear_bit(RESOLVE_CONFLICTS, &connection->flags);
975 				msock.socket = s;
976 				send_first_packet(connection, &msock, P_INITIAL_META);
977 			} else {
978 				drbd_err(connection, "Logic error in conn_connect()\n");
979 				goto out_release_sockets;
980 			}
981 		}
982 
983 		if (connection_established(connection, &sock.socket, &msock.socket))
984 			break;
985 
986 retry:
987 		s = drbd_wait_for_connect(connection, &ad);
988 		if (s) {
989 			int fp = receive_first_packet(connection, s);
990 			drbd_socket_okay(&sock.socket);
991 			drbd_socket_okay(&msock.socket);
992 			switch (fp) {
993 			case P_INITIAL_DATA:
994 				if (sock.socket) {
995 					drbd_warn(connection, "initial packet S crossed\n");
996 					sock_release(sock.socket);
997 					sock.socket = s;
998 					goto randomize;
999 				}
1000 				sock.socket = s;
1001 				break;
1002 			case P_INITIAL_META:
1003 				set_bit(RESOLVE_CONFLICTS, &connection->flags);
1004 				if (msock.socket) {
1005 					drbd_warn(connection, "initial packet M crossed\n");
1006 					sock_release(msock.socket);
1007 					msock.socket = s;
1008 					goto randomize;
1009 				}
1010 				msock.socket = s;
1011 				break;
1012 			default:
1013 				drbd_warn(connection, "Error receiving initial packet\n");
1014 				sock_release(s);
1015 randomize:
1016 				if (prandom_u32() & 1)
1017 					goto retry;
1018 			}
1019 		}
1020 
1021 		if (connection->cstate <= C_DISCONNECTING)
1022 			goto out_release_sockets;
1023 		if (signal_pending(current)) {
1024 			flush_signals(current);
1025 			smp_rmb();
1026 			if (get_t_state(&connection->receiver) == EXITING)
1027 				goto out_release_sockets;
1028 		}
1029 
1030 		ok = connection_established(connection, &sock.socket, &msock.socket);
1031 	} while (!ok);
1032 
1033 	if (ad.s_listen)
1034 		sock_release(ad.s_listen);
1035 
1036 	sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1037 	msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1038 
1039 	sock.socket->sk->sk_allocation = GFP_NOIO;
1040 	msock.socket->sk->sk_allocation = GFP_NOIO;
1041 
1042 	sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1043 	msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
1044 
1045 	/* NOT YET ...
1046 	 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
1047 	 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1048 	 * first set it to the P_CONNECTION_FEATURES timeout,
1049 	 * which we set to 4x the configured ping_timeout. */
1050 	rcu_read_lock();
1051 	nc = rcu_dereference(connection->net_conf);
1052 
1053 	sock.socket->sk->sk_sndtimeo =
1054 	sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
1055 
1056 	msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1057 	timeout = nc->timeout * HZ / 10;
1058 	discard_my_data = nc->discard_my_data;
1059 	rcu_read_unlock();
1060 
1061 	msock.socket->sk->sk_sndtimeo = timeout;
1062 
1063 	/* we don't want delays.
1064 	 * we use TCP_CORK where appropriate, though */
1065 	drbd_tcp_nodelay(sock.socket);
1066 	drbd_tcp_nodelay(msock.socket);
1067 
1068 	connection->data.socket = sock.socket;
1069 	connection->meta.socket = msock.socket;
1070 	connection->last_received = jiffies;
1071 
1072 	h = drbd_do_features(connection);
1073 	if (h <= 0)
1074 		return h;
1075 
1076 	if (connection->cram_hmac_tfm) {
1077 		/* drbd_request_state(device, NS(conn, WFAuth)); */
1078 		switch (drbd_do_auth(connection)) {
1079 		case -1:
1080 			drbd_err(connection, "Authentication of peer failed\n");
1081 			return -1;
1082 		case 0:
1083 			drbd_err(connection, "Authentication of peer failed, trying again.\n");
1084 			return 0;
1085 		}
1086 	}
1087 
1088 	connection->data.socket->sk->sk_sndtimeo = timeout;
1089 	connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1090 
1091 	if (drbd_send_protocol(connection) == -EOPNOTSUPP)
1092 		return -1;
1093 
1094 	/* Prevent a race between resync-handshake and
1095 	 * being promoted to Primary.
1096 	 *
1097 	 * Grab and release the state mutex, so we know that any current
1098 	 * drbd_set_role() is finished, and any incoming drbd_set_role
1099 	 * will see the STATE_SENT flag, and wait for it to be cleared.
1100 	 */
1101 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1102 		mutex_lock(peer_device->device->state_mutex);
1103 
1104 	/* avoid a race with conn_request_state( C_DISCONNECTING ) */
1105 	spin_lock_irq(&connection->resource->req_lock);
1106 	set_bit(STATE_SENT, &connection->flags);
1107 	spin_unlock_irq(&connection->resource->req_lock);
1108 
1109 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1110 		mutex_unlock(peer_device->device->state_mutex);
1111 
1112 	rcu_read_lock();
1113 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1114 		struct drbd_device *device = peer_device->device;
1115 		kref_get(&device->kref);
1116 		rcu_read_unlock();
1117 
1118 		if (discard_my_data)
1119 			set_bit(DISCARD_MY_DATA, &device->flags);
1120 		else
1121 			clear_bit(DISCARD_MY_DATA, &device->flags);
1122 
1123 		drbd_connected(peer_device);
1124 		kref_put(&device->kref, drbd_destroy_device);
1125 		rcu_read_lock();
1126 	}
1127 	rcu_read_unlock();
1128 
1129 	rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1130 	if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1131 		clear_bit(STATE_SENT, &connection->flags);
1132 		return 0;
1133 	}
1134 
1135 	drbd_thread_start(&connection->ack_receiver);
1136 	/* opencoded create_singlethread_workqueue(),
1137 	 * to be able to use format string arguments */
1138 	connection->ack_sender =
1139 		alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
1140 	if (!connection->ack_sender) {
1141 		drbd_err(connection, "Failed to create workqueue ack_sender\n");
1142 		return 0;
1143 	}
1144 
1145 	mutex_lock(&connection->resource->conf_update);
1146 	/* The discard_my_data flag is a single-shot modifier to the next
1147 	 * connection attempt, the handshake of which is now well underway.
1148 	 * No need for rcu style copying of the whole struct
1149 	 * just to clear a single value. */
1150 	connection->net_conf->discard_my_data = 0;
1151 	mutex_unlock(&connection->resource->conf_update);
1152 
1153 	return h;
1154 
1155 out_release_sockets:
1156 	if (ad.s_listen)
1157 		sock_release(ad.s_listen);
1158 	if (sock.socket)
1159 		sock_release(sock.socket);
1160 	if (msock.socket)
1161 		sock_release(msock.socket);
1162 	return -1;
1163 }
1164 
decode_header(struct drbd_connection * connection,void * header,struct packet_info * pi)1165 static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
1166 {
1167 	unsigned int header_size = drbd_header_size(connection);
1168 
1169 	if (header_size == sizeof(struct p_header100) &&
1170 	    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1171 		struct p_header100 *h = header;
1172 		if (h->pad != 0) {
1173 			drbd_err(connection, "Header padding is not zero\n");
1174 			return -EINVAL;
1175 		}
1176 		pi->vnr = be16_to_cpu(h->volume);
1177 		pi->cmd = be16_to_cpu(h->command);
1178 		pi->size = be32_to_cpu(h->length);
1179 	} else if (header_size == sizeof(struct p_header95) &&
1180 		   *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1181 		struct p_header95 *h = header;
1182 		pi->cmd = be16_to_cpu(h->command);
1183 		pi->size = be32_to_cpu(h->length);
1184 		pi->vnr = 0;
1185 	} else if (header_size == sizeof(struct p_header80) &&
1186 		   *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1187 		struct p_header80 *h = header;
1188 		pi->cmd = be16_to_cpu(h->command);
1189 		pi->size = be16_to_cpu(h->length);
1190 		pi->vnr = 0;
1191 	} else {
1192 		drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
1193 			 be32_to_cpu(*(__be32 *)header),
1194 			 connection->agreed_pro_version);
1195 		return -EINVAL;
1196 	}
1197 	pi->data = header + header_size;
1198 	return 0;
1199 }
1200 
drbd_unplug_all_devices(struct drbd_connection * connection)1201 static void drbd_unplug_all_devices(struct drbd_connection *connection)
1202 {
1203 	if (current->plug == &connection->receiver_plug) {
1204 		blk_finish_plug(&connection->receiver_plug);
1205 		blk_start_plug(&connection->receiver_plug);
1206 	} /* else: maybe just schedule() ?? */
1207 }
1208 
drbd_recv_header(struct drbd_connection * connection,struct packet_info * pi)1209 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
1210 {
1211 	void *buffer = connection->data.rbuf;
1212 	int err;
1213 
1214 	err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
1215 	if (err)
1216 		return err;
1217 
1218 	err = decode_header(connection, buffer, pi);
1219 	connection->last_received = jiffies;
1220 
1221 	return err;
1222 }
1223 
drbd_recv_header_maybe_unplug(struct drbd_connection * connection,struct packet_info * pi)1224 static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1225 {
1226 	void *buffer = connection->data.rbuf;
1227 	unsigned int size = drbd_header_size(connection);
1228 	int err;
1229 
1230 	err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1231 	if (err != size) {
1232 		/* If we have nothing in the receive buffer now, to reduce
1233 		 * application latency, try to drain the backend queues as
1234 		 * quickly as possible, and let remote TCP know what we have
1235 		 * received so far. */
1236 		if (err == -EAGAIN) {
1237 			drbd_tcp_quickack(connection->data.socket);
1238 			drbd_unplug_all_devices(connection);
1239 		}
1240 		if (err > 0) {
1241 			buffer += err;
1242 			size -= err;
1243 		}
1244 		err = drbd_recv_all_warn(connection, buffer, size);
1245 		if (err)
1246 			return err;
1247 	}
1248 
1249 	err = decode_header(connection, connection->data.rbuf, pi);
1250 	connection->last_received = jiffies;
1251 
1252 	return err;
1253 }
1254 /* This is blkdev_issue_flush, but asynchronous.
1255  * We want to submit to all component volumes in parallel,
1256  * then wait for all completions.
1257  */
1258 struct issue_flush_context {
1259 	atomic_t pending;
1260 	int error;
1261 	struct completion done;
1262 };
1263 struct one_flush_context {
1264 	struct drbd_device *device;
1265 	struct issue_flush_context *ctx;
1266 };
1267 
one_flush_endio(struct bio * bio)1268 static void one_flush_endio(struct bio *bio)
1269 {
1270 	struct one_flush_context *octx = bio->bi_private;
1271 	struct drbd_device *device = octx->device;
1272 	struct issue_flush_context *ctx = octx->ctx;
1273 
1274 	if (bio->bi_status) {
1275 		ctx->error = blk_status_to_errno(bio->bi_status);
1276 		drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
1277 	}
1278 	kfree(octx);
1279 	bio_put(bio);
1280 
1281 	clear_bit(FLUSH_PENDING, &device->flags);
1282 	put_ldev(device);
1283 	kref_put(&device->kref, drbd_destroy_device);
1284 
1285 	if (atomic_dec_and_test(&ctx->pending))
1286 		complete(&ctx->done);
1287 }
1288 
submit_one_flush(struct drbd_device * device,struct issue_flush_context * ctx)1289 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1290 {
1291 	struct bio *bio = bio_alloc(GFP_NOIO, 0);
1292 	struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
1293 	if (!bio || !octx) {
1294 		drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
1295 		/* FIXME: what else can I do now?  disconnecting or detaching
1296 		 * really does not help to improve the state of the world, either.
1297 		 */
1298 		kfree(octx);
1299 		if (bio)
1300 			bio_put(bio);
1301 
1302 		ctx->error = -ENOMEM;
1303 		put_ldev(device);
1304 		kref_put(&device->kref, drbd_destroy_device);
1305 		return;
1306 	}
1307 
1308 	octx->device = device;
1309 	octx->ctx = ctx;
1310 	bio_set_dev(bio, device->ldev->backing_bdev);
1311 	bio->bi_private = octx;
1312 	bio->bi_end_io = one_flush_endio;
1313 	bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
1314 
1315 	device->flush_jif = jiffies;
1316 	set_bit(FLUSH_PENDING, &device->flags);
1317 	atomic_inc(&ctx->pending);
1318 	submit_bio(bio);
1319 }
1320 
drbd_flush(struct drbd_connection * connection)1321 static void drbd_flush(struct drbd_connection *connection)
1322 {
1323 	if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
1324 		struct drbd_peer_device *peer_device;
1325 		struct issue_flush_context ctx;
1326 		int vnr;
1327 
1328 		atomic_set(&ctx.pending, 1);
1329 		ctx.error = 0;
1330 		init_completion(&ctx.done);
1331 
1332 		rcu_read_lock();
1333 		idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1334 			struct drbd_device *device = peer_device->device;
1335 
1336 			if (!get_ldev(device))
1337 				continue;
1338 			kref_get(&device->kref);
1339 			rcu_read_unlock();
1340 
1341 			submit_one_flush(device, &ctx);
1342 
1343 			rcu_read_lock();
1344 		}
1345 		rcu_read_unlock();
1346 
1347 		/* Do we want to add a timeout,
1348 		 * if disk-timeout is set? */
1349 		if (!atomic_dec_and_test(&ctx.pending))
1350 			wait_for_completion(&ctx.done);
1351 
1352 		if (ctx.error) {
1353 			/* would rather check on EOPNOTSUPP, but that is not reliable.
1354 			 * don't try again for ANY return value != 0
1355 			 * if (rv == -EOPNOTSUPP) */
1356 			/* Any error is already reported by bio_endio callback. */
1357 			drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1358 		}
1359 	}
1360 }
1361 
1362 /**
1363  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1364  * @device:	DRBD device.
1365  * @epoch:	Epoch object.
1366  * @ev:		Epoch event.
1367  */
drbd_may_finish_epoch(struct drbd_connection * connection,struct drbd_epoch * epoch,enum epoch_event ev)1368 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
1369 					       struct drbd_epoch *epoch,
1370 					       enum epoch_event ev)
1371 {
1372 	int epoch_size;
1373 	struct drbd_epoch *next_epoch;
1374 	enum finish_epoch rv = FE_STILL_LIVE;
1375 
1376 	spin_lock(&connection->epoch_lock);
1377 	do {
1378 		next_epoch = NULL;
1379 
1380 		epoch_size = atomic_read(&epoch->epoch_size);
1381 
1382 		switch (ev & ~EV_CLEANUP) {
1383 		case EV_PUT:
1384 			atomic_dec(&epoch->active);
1385 			break;
1386 		case EV_GOT_BARRIER_NR:
1387 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1388 			break;
1389 		case EV_BECAME_LAST:
1390 			/* nothing to do*/
1391 			break;
1392 		}
1393 
1394 		if (epoch_size != 0 &&
1395 		    atomic_read(&epoch->active) == 0 &&
1396 		    (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1397 			if (!(ev & EV_CLEANUP)) {
1398 				spin_unlock(&connection->epoch_lock);
1399 				drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1400 				spin_lock(&connection->epoch_lock);
1401 			}
1402 #if 0
1403 			/* FIXME: dec unacked on connection, once we have
1404 			 * something to count pending connection packets in. */
1405 			if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1406 				dec_unacked(epoch->connection);
1407 #endif
1408 
1409 			if (connection->current_epoch != epoch) {
1410 				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1411 				list_del(&epoch->list);
1412 				ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1413 				connection->epochs--;
1414 				kfree(epoch);
1415 
1416 				if (rv == FE_STILL_LIVE)
1417 					rv = FE_DESTROYED;
1418 			} else {
1419 				epoch->flags = 0;
1420 				atomic_set(&epoch->epoch_size, 0);
1421 				/* atomic_set(&epoch->active, 0); is already zero */
1422 				if (rv == FE_STILL_LIVE)
1423 					rv = FE_RECYCLED;
1424 			}
1425 		}
1426 
1427 		if (!next_epoch)
1428 			break;
1429 
1430 		epoch = next_epoch;
1431 	} while (1);
1432 
1433 	spin_unlock(&connection->epoch_lock);
1434 
1435 	return rv;
1436 }
1437 
1438 static enum write_ordering_e
max_allowed_wo(struct drbd_backing_dev * bdev,enum write_ordering_e wo)1439 max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1440 {
1441 	struct disk_conf *dc;
1442 
1443 	dc = rcu_dereference(bdev->disk_conf);
1444 
1445 	if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1446 		wo = WO_DRAIN_IO;
1447 	if (wo == WO_DRAIN_IO && !dc->disk_drain)
1448 		wo = WO_NONE;
1449 
1450 	return wo;
1451 }
1452 
1453 /**
1454  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1455  * @connection:	DRBD connection.
1456  * @wo:		Write ordering method to try.
1457  */
drbd_bump_write_ordering(struct drbd_resource * resource,struct drbd_backing_dev * bdev,enum write_ordering_e wo)1458 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1459 			      enum write_ordering_e wo)
1460 {
1461 	struct drbd_device *device;
1462 	enum write_ordering_e pwo;
1463 	int vnr;
1464 	static char *write_ordering_str[] = {
1465 		[WO_NONE] = "none",
1466 		[WO_DRAIN_IO] = "drain",
1467 		[WO_BDEV_FLUSH] = "flush",
1468 	};
1469 
1470 	pwo = resource->write_ordering;
1471 	if (wo != WO_BDEV_FLUSH)
1472 		wo = min(pwo, wo);
1473 	rcu_read_lock();
1474 	idr_for_each_entry(&resource->devices, device, vnr) {
1475 		if (get_ldev(device)) {
1476 			wo = max_allowed_wo(device->ldev, wo);
1477 			if (device->ldev == bdev)
1478 				bdev = NULL;
1479 			put_ldev(device);
1480 		}
1481 	}
1482 
1483 	if (bdev)
1484 		wo = max_allowed_wo(bdev, wo);
1485 
1486 	rcu_read_unlock();
1487 
1488 	resource->write_ordering = wo;
1489 	if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
1490 		drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
1491 }
1492 
drbd_issue_peer_discard(struct drbd_device * device,struct drbd_peer_request * peer_req)1493 static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
1494 {
1495 	struct block_device *bdev = device->ldev->backing_bdev;
1496 
1497 	if (blkdev_issue_zeroout(bdev, peer_req->i.sector, peer_req->i.size >> 9,
1498 			GFP_NOIO, 0))
1499 		peer_req->flags |= EE_WAS_ERROR;
1500 
1501 	drbd_endio_write_sec_final(peer_req);
1502 }
1503 
drbd_issue_peer_wsame(struct drbd_device * device,struct drbd_peer_request * peer_req)1504 static void drbd_issue_peer_wsame(struct drbd_device *device,
1505 				  struct drbd_peer_request *peer_req)
1506 {
1507 	struct block_device *bdev = device->ldev->backing_bdev;
1508 	sector_t s = peer_req->i.sector;
1509 	sector_t nr = peer_req->i.size >> 9;
1510 	if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
1511 		peer_req->flags |= EE_WAS_ERROR;
1512 	drbd_endio_write_sec_final(peer_req);
1513 }
1514 
1515 
1516 /**
1517  * drbd_submit_peer_request()
1518  * @device:	DRBD device.
1519  * @peer_req:	peer request
1520  * @rw:		flag field, see bio->bi_opf
1521  *
1522  * May spread the pages to multiple bios,
1523  * depending on bio_add_page restrictions.
1524  *
1525  * Returns 0 if all bios have been submitted,
1526  * -ENOMEM if we could not allocate enough bios,
1527  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1528  *  single page to an empty bio (which should never happen and likely indicates
1529  *  that the lower level IO stack is in some way broken). This has been observed
1530  *  on certain Xen deployments.
1531  */
1532 /* TODO allocate from our own bio_set. */
drbd_submit_peer_request(struct drbd_device * device,struct drbd_peer_request * peer_req,const unsigned op,const unsigned op_flags,const int fault_type)1533 int drbd_submit_peer_request(struct drbd_device *device,
1534 			     struct drbd_peer_request *peer_req,
1535 			     const unsigned op, const unsigned op_flags,
1536 			     const int fault_type)
1537 {
1538 	struct bio *bios = NULL;
1539 	struct bio *bio;
1540 	struct page *page = peer_req->pages;
1541 	sector_t sector = peer_req->i.sector;
1542 	unsigned data_size = peer_req->i.size;
1543 	unsigned n_bios = 0;
1544 	unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
1545 	int err = -ENOMEM;
1546 
1547 	/* TRIM/DISCARD: for now, always use the helper function
1548 	 * blkdev_issue_zeroout(..., discard=true).
1549 	 * It's synchronous, but it does the right thing wrt. bio splitting.
1550 	 * Correctness first, performance later.  Next step is to code an
1551 	 * asynchronous variant of the same.
1552 	 */
1553 	if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) {
1554 		/* wait for all pending IO completions, before we start
1555 		 * zeroing things out. */
1556 		conn_wait_active_ee_empty(peer_req->peer_device->connection);
1557 		/* add it to the active list now,
1558 		 * so we can find it to present it in debugfs */
1559 		peer_req->submit_jif = jiffies;
1560 		peer_req->flags |= EE_SUBMITTED;
1561 
1562 		/* If this was a resync request from receive_rs_deallocated(),
1563 		 * it is already on the sync_ee list */
1564 		if (list_empty(&peer_req->w.list)) {
1565 			spin_lock_irq(&device->resource->req_lock);
1566 			list_add_tail(&peer_req->w.list, &device->active_ee);
1567 			spin_unlock_irq(&device->resource->req_lock);
1568 		}
1569 
1570 		if (peer_req->flags & EE_IS_TRIM)
1571 			drbd_issue_peer_discard(device, peer_req);
1572 		else /* EE_WRITE_SAME */
1573 			drbd_issue_peer_wsame(device, peer_req);
1574 		return 0;
1575 	}
1576 
1577 	/* In most cases, we will only need one bio.  But in case the lower
1578 	 * level restrictions happen to be different at this offset on this
1579 	 * side than those of the sending peer, we may need to submit the
1580 	 * request in more than one bio.
1581 	 *
1582 	 * Plain bio_alloc is good enough here, this is no DRBD internally
1583 	 * generated bio, but a bio allocated on behalf of the peer.
1584 	 */
1585 next_bio:
1586 	bio = bio_alloc(GFP_NOIO, nr_pages);
1587 	if (!bio) {
1588 		drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
1589 		goto fail;
1590 	}
1591 	/* > peer_req->i.sector, unless this is the first bio */
1592 	bio->bi_iter.bi_sector = sector;
1593 	bio_set_dev(bio, device->ldev->backing_bdev);
1594 	bio_set_op_attrs(bio, op, op_flags);
1595 	bio->bi_private = peer_req;
1596 	bio->bi_end_io = drbd_peer_request_endio;
1597 
1598 	bio->bi_next = bios;
1599 	bios = bio;
1600 	++n_bios;
1601 
1602 	page_chain_for_each(page) {
1603 		unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
1604 		if (!bio_add_page(bio, page, len, 0))
1605 			goto next_bio;
1606 		data_size -= len;
1607 		sector += len >> 9;
1608 		--nr_pages;
1609 	}
1610 	D_ASSERT(device, data_size == 0);
1611 	D_ASSERT(device, page == NULL);
1612 
1613 	atomic_set(&peer_req->pending_bios, n_bios);
1614 	/* for debugfs: update timestamp, mark as submitted */
1615 	peer_req->submit_jif = jiffies;
1616 	peer_req->flags |= EE_SUBMITTED;
1617 	do {
1618 		bio = bios;
1619 		bios = bios->bi_next;
1620 		bio->bi_next = NULL;
1621 
1622 		drbd_generic_make_request(device, fault_type, bio);
1623 	} while (bios);
1624 	return 0;
1625 
1626 fail:
1627 	while (bios) {
1628 		bio = bios;
1629 		bios = bios->bi_next;
1630 		bio_put(bio);
1631 	}
1632 	return err;
1633 }
1634 
drbd_remove_epoch_entry_interval(struct drbd_device * device,struct drbd_peer_request * peer_req)1635 static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1636 					     struct drbd_peer_request *peer_req)
1637 {
1638 	struct drbd_interval *i = &peer_req->i;
1639 
1640 	drbd_remove_interval(&device->write_requests, i);
1641 	drbd_clear_interval(i);
1642 
1643 	/* Wake up any processes waiting for this peer request to complete.  */
1644 	if (i->waiting)
1645 		wake_up(&device->misc_wait);
1646 }
1647 
conn_wait_active_ee_empty(struct drbd_connection * connection)1648 static void conn_wait_active_ee_empty(struct drbd_connection *connection)
1649 {
1650 	struct drbd_peer_device *peer_device;
1651 	int vnr;
1652 
1653 	rcu_read_lock();
1654 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1655 		struct drbd_device *device = peer_device->device;
1656 
1657 		kref_get(&device->kref);
1658 		rcu_read_unlock();
1659 		drbd_wait_ee_list_empty(device, &device->active_ee);
1660 		kref_put(&device->kref, drbd_destroy_device);
1661 		rcu_read_lock();
1662 	}
1663 	rcu_read_unlock();
1664 }
1665 
receive_Barrier(struct drbd_connection * connection,struct packet_info * pi)1666 static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
1667 {
1668 	int rv;
1669 	struct p_barrier *p = pi->data;
1670 	struct drbd_epoch *epoch;
1671 
1672 	/* FIXME these are unacked on connection,
1673 	 * not a specific (peer)device.
1674 	 */
1675 	connection->current_epoch->barrier_nr = p->barrier;
1676 	connection->current_epoch->connection = connection;
1677 	rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
1678 
1679 	/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1680 	 * the activity log, which means it would not be resynced in case the
1681 	 * R_PRIMARY crashes now.
1682 	 * Therefore we must send the barrier_ack after the barrier request was
1683 	 * completed. */
1684 	switch (connection->resource->write_ordering) {
1685 	case WO_NONE:
1686 		if (rv == FE_RECYCLED)
1687 			return 0;
1688 
1689 		/* receiver context, in the writeout path of the other node.
1690 		 * avoid potential distributed deadlock */
1691 		epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1692 		if (epoch)
1693 			break;
1694 		else
1695 			drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
1696 			/* Fall through */
1697 
1698 	case WO_BDEV_FLUSH:
1699 	case WO_DRAIN_IO:
1700 		conn_wait_active_ee_empty(connection);
1701 		drbd_flush(connection);
1702 
1703 		if (atomic_read(&connection->current_epoch->epoch_size)) {
1704 			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1705 			if (epoch)
1706 				break;
1707 		}
1708 
1709 		return 0;
1710 	default:
1711 		drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1712 			 connection->resource->write_ordering);
1713 		return -EIO;
1714 	}
1715 
1716 	epoch->flags = 0;
1717 	atomic_set(&epoch->epoch_size, 0);
1718 	atomic_set(&epoch->active, 0);
1719 
1720 	spin_lock(&connection->epoch_lock);
1721 	if (atomic_read(&connection->current_epoch->epoch_size)) {
1722 		list_add(&epoch->list, &connection->current_epoch->list);
1723 		connection->current_epoch = epoch;
1724 		connection->epochs++;
1725 	} else {
1726 		/* The current_epoch got recycled while we allocated this one... */
1727 		kfree(epoch);
1728 	}
1729 	spin_unlock(&connection->epoch_lock);
1730 
1731 	return 0;
1732 }
1733 
1734 /* quick wrapper in case payload size != request_size (write same) */
drbd_csum_ee_size(struct crypto_ahash * h,struct drbd_peer_request * r,void * d,unsigned int payload_size)1735 static void drbd_csum_ee_size(struct crypto_ahash *h,
1736 			      struct drbd_peer_request *r, void *d,
1737 			      unsigned int payload_size)
1738 {
1739 	unsigned int tmp = r->i.size;
1740 	r->i.size = payload_size;
1741 	drbd_csum_ee(h, r, d);
1742 	r->i.size = tmp;
1743 }
1744 
1745 /* used from receive_RSDataReply (recv_resync_read)
1746  * and from receive_Data.
1747  * data_size: actual payload ("data in")
1748  * 	for normal writes that is bi_size.
1749  * 	for discards, that is zero.
1750  * 	for write same, it is logical_block_size.
1751  * both trim and write same have the bi_size ("data len to be affected")
1752  * as extra argument in the packet header.
1753  */
1754 static struct drbd_peer_request *
read_in_block(struct drbd_peer_device * peer_device,u64 id,sector_t sector,struct packet_info * pi)1755 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1756 	      struct packet_info *pi) __must_hold(local)
1757 {
1758 	struct drbd_device *device = peer_device->device;
1759 	const sector_t capacity = drbd_get_capacity(device->this_bdev);
1760 	struct drbd_peer_request *peer_req;
1761 	struct page *page;
1762 	int digest_size, err;
1763 	unsigned int data_size = pi->size, ds;
1764 	void *dig_in = peer_device->connection->int_dig_in;
1765 	void *dig_vv = peer_device->connection->int_dig_vv;
1766 	unsigned long *data;
1767 	struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
1768 	struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
1769 
1770 	digest_size = 0;
1771 	if (!trim && peer_device->connection->peer_integrity_tfm) {
1772 		digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1773 		/*
1774 		 * FIXME: Receive the incoming digest into the receive buffer
1775 		 *	  here, together with its struct p_data?
1776 		 */
1777 		err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1778 		if (err)
1779 			return NULL;
1780 		data_size -= digest_size;
1781 	}
1782 
1783 	/* assume request_size == data_size, but special case trim and wsame. */
1784 	ds = data_size;
1785 	if (trim) {
1786 		if (!expect(data_size == 0))
1787 			return NULL;
1788 		ds = be32_to_cpu(trim->size);
1789 	} else if (wsame) {
1790 		if (data_size != queue_logical_block_size(device->rq_queue)) {
1791 			drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
1792 				data_size, queue_logical_block_size(device->rq_queue));
1793 			return NULL;
1794 		}
1795 		if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
1796 			drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
1797 				data_size, bdev_logical_block_size(device->ldev->backing_bdev));
1798 			return NULL;
1799 		}
1800 		ds = be32_to_cpu(wsame->size);
1801 	}
1802 
1803 	if (!expect(IS_ALIGNED(ds, 512)))
1804 		return NULL;
1805 	if (trim || wsame) {
1806 		if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1807 			return NULL;
1808 	} else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
1809 		return NULL;
1810 
1811 	/* even though we trust out peer,
1812 	 * we sometimes have to double check. */
1813 	if (sector + (ds>>9) > capacity) {
1814 		drbd_err(device, "request from peer beyond end of local disk: "
1815 			"capacity: %llus < sector: %llus + size: %u\n",
1816 			(unsigned long long)capacity,
1817 			(unsigned long long)sector, ds);
1818 		return NULL;
1819 	}
1820 
1821 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1822 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1823 	 * which in turn might block on the other node at this very place.  */
1824 	peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
1825 	if (!peer_req)
1826 		return NULL;
1827 
1828 	peer_req->flags |= EE_WRITE;
1829 	if (trim) {
1830 		peer_req->flags |= EE_IS_TRIM;
1831 		return peer_req;
1832 	}
1833 	if (wsame)
1834 		peer_req->flags |= EE_WRITE_SAME;
1835 
1836 	/* receive payload size bytes into page chain */
1837 	ds = data_size;
1838 	page = peer_req->pages;
1839 	page_chain_for_each(page) {
1840 		unsigned len = min_t(int, ds, PAGE_SIZE);
1841 		data = kmap(page);
1842 		err = drbd_recv_all_warn(peer_device->connection, data, len);
1843 		if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1844 			drbd_err(device, "Fault injection: Corrupting data on receive\n");
1845 			data[0] = data[0] ^ (unsigned long)-1;
1846 		}
1847 		kunmap(page);
1848 		if (err) {
1849 			drbd_free_peer_req(device, peer_req);
1850 			return NULL;
1851 		}
1852 		ds -= len;
1853 	}
1854 
1855 	if (digest_size) {
1856 		drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
1857 		if (memcmp(dig_in, dig_vv, digest_size)) {
1858 			drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1859 				(unsigned long long)sector, data_size);
1860 			drbd_free_peer_req(device, peer_req);
1861 			return NULL;
1862 		}
1863 	}
1864 	device->recv_cnt += data_size >> 9;
1865 	return peer_req;
1866 }
1867 
1868 /* drbd_drain_block() just takes a data block
1869  * out of the socket input buffer, and discards it.
1870  */
drbd_drain_block(struct drbd_peer_device * peer_device,int data_size)1871 static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
1872 {
1873 	struct page *page;
1874 	int err = 0;
1875 	void *data;
1876 
1877 	if (!data_size)
1878 		return 0;
1879 
1880 	page = drbd_alloc_pages(peer_device, 1, 1);
1881 
1882 	data = kmap(page);
1883 	while (data_size) {
1884 		unsigned int len = min_t(int, data_size, PAGE_SIZE);
1885 
1886 		err = drbd_recv_all_warn(peer_device->connection, data, len);
1887 		if (err)
1888 			break;
1889 		data_size -= len;
1890 	}
1891 	kunmap(page);
1892 	drbd_free_pages(peer_device->device, page, 0);
1893 	return err;
1894 }
1895 
recv_dless_read(struct drbd_peer_device * peer_device,struct drbd_request * req,sector_t sector,int data_size)1896 static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
1897 			   sector_t sector, int data_size)
1898 {
1899 	struct bio_vec bvec;
1900 	struct bvec_iter iter;
1901 	struct bio *bio;
1902 	int digest_size, err, expect;
1903 	void *dig_in = peer_device->connection->int_dig_in;
1904 	void *dig_vv = peer_device->connection->int_dig_vv;
1905 
1906 	digest_size = 0;
1907 	if (peer_device->connection->peer_integrity_tfm) {
1908 		digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1909 		err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1910 		if (err)
1911 			return err;
1912 		data_size -= digest_size;
1913 	}
1914 
1915 	/* optimistically update recv_cnt.  if receiving fails below,
1916 	 * we disconnect anyways, and counters will be reset. */
1917 	peer_device->device->recv_cnt += data_size>>9;
1918 
1919 	bio = req->master_bio;
1920 	D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
1921 
1922 	bio_for_each_segment(bvec, bio, iter) {
1923 		void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1924 		expect = min_t(int, data_size, bvec.bv_len);
1925 		err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
1926 		kunmap(bvec.bv_page);
1927 		if (err)
1928 			return err;
1929 		data_size -= expect;
1930 	}
1931 
1932 	if (digest_size) {
1933 		drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
1934 		if (memcmp(dig_in, dig_vv, digest_size)) {
1935 			drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
1936 			return -EINVAL;
1937 		}
1938 	}
1939 
1940 	D_ASSERT(peer_device->device, data_size == 0);
1941 	return 0;
1942 }
1943 
1944 /*
1945  * e_end_resync_block() is called in ack_sender context via
1946  * drbd_finish_peer_reqs().
1947  */
e_end_resync_block(struct drbd_work * w,int unused)1948 static int e_end_resync_block(struct drbd_work *w, int unused)
1949 {
1950 	struct drbd_peer_request *peer_req =
1951 		container_of(w, struct drbd_peer_request, w);
1952 	struct drbd_peer_device *peer_device = peer_req->peer_device;
1953 	struct drbd_device *device = peer_device->device;
1954 	sector_t sector = peer_req->i.sector;
1955 	int err;
1956 
1957 	D_ASSERT(device, drbd_interval_empty(&peer_req->i));
1958 
1959 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1960 		drbd_set_in_sync(device, sector, peer_req->i.size);
1961 		err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
1962 	} else {
1963 		/* Record failure to sync */
1964 		drbd_rs_failed_io(device, sector, peer_req->i.size);
1965 
1966 		err  = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
1967 	}
1968 	dec_unacked(device);
1969 
1970 	return err;
1971 }
1972 
recv_resync_read(struct drbd_peer_device * peer_device,sector_t sector,struct packet_info * pi)1973 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
1974 			    struct packet_info *pi) __releases(local)
1975 {
1976 	struct drbd_device *device = peer_device->device;
1977 	struct drbd_peer_request *peer_req;
1978 
1979 	peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
1980 	if (!peer_req)
1981 		goto fail;
1982 
1983 	dec_rs_pending(device);
1984 
1985 	inc_unacked(device);
1986 	/* corresponding dec_unacked() in e_end_resync_block()
1987 	 * respective _drbd_clear_done_ee */
1988 
1989 	peer_req->w.cb = e_end_resync_block;
1990 	peer_req->submit_jif = jiffies;
1991 
1992 	spin_lock_irq(&device->resource->req_lock);
1993 	list_add_tail(&peer_req->w.list, &device->sync_ee);
1994 	spin_unlock_irq(&device->resource->req_lock);
1995 
1996 	atomic_add(pi->size >> 9, &device->rs_sect_ev);
1997 	if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
1998 				     DRBD_FAULT_RS_WR) == 0)
1999 		return 0;
2000 
2001 	/* don't care for the reason here */
2002 	drbd_err(device, "submit failed, triggering re-connect\n");
2003 	spin_lock_irq(&device->resource->req_lock);
2004 	list_del(&peer_req->w.list);
2005 	spin_unlock_irq(&device->resource->req_lock);
2006 
2007 	drbd_free_peer_req(device, peer_req);
2008 fail:
2009 	put_ldev(device);
2010 	return -EIO;
2011 }
2012 
2013 static struct drbd_request *
find_request(struct drbd_device * device,struct rb_root * root,u64 id,sector_t sector,bool missing_ok,const char * func)2014 find_request(struct drbd_device *device, struct rb_root *root, u64 id,
2015 	     sector_t sector, bool missing_ok, const char *func)
2016 {
2017 	struct drbd_request *req;
2018 
2019 	/* Request object according to our peer */
2020 	req = (struct drbd_request *)(unsigned long)id;
2021 	if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
2022 		return req;
2023 	if (!missing_ok) {
2024 		drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
2025 			(unsigned long)id, (unsigned long long)sector);
2026 	}
2027 	return NULL;
2028 }
2029 
receive_DataReply(struct drbd_connection * connection,struct packet_info * pi)2030 static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
2031 {
2032 	struct drbd_peer_device *peer_device;
2033 	struct drbd_device *device;
2034 	struct drbd_request *req;
2035 	sector_t sector;
2036 	int err;
2037 	struct p_data *p = pi->data;
2038 
2039 	peer_device = conn_peer_device(connection, pi->vnr);
2040 	if (!peer_device)
2041 		return -EIO;
2042 	device = peer_device->device;
2043 
2044 	sector = be64_to_cpu(p->sector);
2045 
2046 	spin_lock_irq(&device->resource->req_lock);
2047 	req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
2048 	spin_unlock_irq(&device->resource->req_lock);
2049 	if (unlikely(!req))
2050 		return -EIO;
2051 
2052 	/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
2053 	 * special casing it there for the various failure cases.
2054 	 * still no race with drbd_fail_pending_reads */
2055 	err = recv_dless_read(peer_device, req, sector, pi->size);
2056 	if (!err)
2057 		req_mod(req, DATA_RECEIVED);
2058 	/* else: nothing. handled from drbd_disconnect...
2059 	 * I don't think we may complete this just yet
2060 	 * in case we are "on-disconnect: freeze" */
2061 
2062 	return err;
2063 }
2064 
receive_RSDataReply(struct drbd_connection * connection,struct packet_info * pi)2065 static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
2066 {
2067 	struct drbd_peer_device *peer_device;
2068 	struct drbd_device *device;
2069 	sector_t sector;
2070 	int err;
2071 	struct p_data *p = pi->data;
2072 
2073 	peer_device = conn_peer_device(connection, pi->vnr);
2074 	if (!peer_device)
2075 		return -EIO;
2076 	device = peer_device->device;
2077 
2078 	sector = be64_to_cpu(p->sector);
2079 	D_ASSERT(device, p->block_id == ID_SYNCER);
2080 
2081 	if (get_ldev(device)) {
2082 		/* data is submitted to disk within recv_resync_read.
2083 		 * corresponding put_ldev done below on error,
2084 		 * or in drbd_peer_request_endio. */
2085 		err = recv_resync_read(peer_device, sector, pi);
2086 	} else {
2087 		if (__ratelimit(&drbd_ratelimit_state))
2088 			drbd_err(device, "Can not write resync data to local disk.\n");
2089 
2090 		err = drbd_drain_block(peer_device, pi->size);
2091 
2092 		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2093 	}
2094 
2095 	atomic_add(pi->size >> 9, &device->rs_sect_in);
2096 
2097 	return err;
2098 }
2099 
restart_conflicting_writes(struct drbd_device * device,sector_t sector,int size)2100 static void restart_conflicting_writes(struct drbd_device *device,
2101 				       sector_t sector, int size)
2102 {
2103 	struct drbd_interval *i;
2104 	struct drbd_request *req;
2105 
2106 	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2107 		if (!i->local)
2108 			continue;
2109 		req = container_of(i, struct drbd_request, i);
2110 		if (req->rq_state & RQ_LOCAL_PENDING ||
2111 		    !(req->rq_state & RQ_POSTPONED))
2112 			continue;
2113 		/* as it is RQ_POSTPONED, this will cause it to
2114 		 * be queued on the retry workqueue. */
2115 		__req_mod(req, CONFLICT_RESOLVED, NULL);
2116 	}
2117 }
2118 
2119 /*
2120  * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
2121  */
e_end_block(struct drbd_work * w,int cancel)2122 static int e_end_block(struct drbd_work *w, int cancel)
2123 {
2124 	struct drbd_peer_request *peer_req =
2125 		container_of(w, struct drbd_peer_request, w);
2126 	struct drbd_peer_device *peer_device = peer_req->peer_device;
2127 	struct drbd_device *device = peer_device->device;
2128 	sector_t sector = peer_req->i.sector;
2129 	int err = 0, pcmd;
2130 
2131 	if (peer_req->flags & EE_SEND_WRITE_ACK) {
2132 		if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2133 			pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2134 				device->state.conn <= C_PAUSED_SYNC_T &&
2135 				peer_req->flags & EE_MAY_SET_IN_SYNC) ?
2136 				P_RS_WRITE_ACK : P_WRITE_ACK;
2137 			err = drbd_send_ack(peer_device, pcmd, peer_req);
2138 			if (pcmd == P_RS_WRITE_ACK)
2139 				drbd_set_in_sync(device, sector, peer_req->i.size);
2140 		} else {
2141 			err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2142 			/* we expect it to be marked out of sync anyways...
2143 			 * maybe assert this?  */
2144 		}
2145 		dec_unacked(device);
2146 	}
2147 
2148 	/* we delete from the conflict detection hash _after_ we sent out the
2149 	 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
2150 	if (peer_req->flags & EE_IN_INTERVAL_TREE) {
2151 		spin_lock_irq(&device->resource->req_lock);
2152 		D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2153 		drbd_remove_epoch_entry_interval(device, peer_req);
2154 		if (peer_req->flags & EE_RESTART_REQUESTS)
2155 			restart_conflicting_writes(device, sector, peer_req->i.size);
2156 		spin_unlock_irq(&device->resource->req_lock);
2157 	} else
2158 		D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2159 
2160 	drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2161 
2162 	return err;
2163 }
2164 
e_send_ack(struct drbd_work * w,enum drbd_packet ack)2165 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
2166 {
2167 	struct drbd_peer_request *peer_req =
2168 		container_of(w, struct drbd_peer_request, w);
2169 	struct drbd_peer_device *peer_device = peer_req->peer_device;
2170 	int err;
2171 
2172 	err = drbd_send_ack(peer_device, ack, peer_req);
2173 	dec_unacked(peer_device->device);
2174 
2175 	return err;
2176 }
2177 
e_send_superseded(struct drbd_work * w,int unused)2178 static int e_send_superseded(struct drbd_work *w, int unused)
2179 {
2180 	return e_send_ack(w, P_SUPERSEDED);
2181 }
2182 
e_send_retry_write(struct drbd_work * w,int unused)2183 static int e_send_retry_write(struct drbd_work *w, int unused)
2184 {
2185 	struct drbd_peer_request *peer_req =
2186 		container_of(w, struct drbd_peer_request, w);
2187 	struct drbd_connection *connection = peer_req->peer_device->connection;
2188 
2189 	return e_send_ack(w, connection->agreed_pro_version >= 100 ?
2190 			     P_RETRY_WRITE : P_SUPERSEDED);
2191 }
2192 
seq_greater(u32 a,u32 b)2193 static bool seq_greater(u32 a, u32 b)
2194 {
2195 	/*
2196 	 * We assume 32-bit wrap-around here.
2197 	 * For 24-bit wrap-around, we would have to shift:
2198 	 *  a <<= 8; b <<= 8;
2199 	 */
2200 	return (s32)a - (s32)b > 0;
2201 }
2202 
seq_max(u32 a,u32 b)2203 static u32 seq_max(u32 a, u32 b)
2204 {
2205 	return seq_greater(a, b) ? a : b;
2206 }
2207 
update_peer_seq(struct drbd_peer_device * peer_device,unsigned int peer_seq)2208 static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
2209 {
2210 	struct drbd_device *device = peer_device->device;
2211 	unsigned int newest_peer_seq;
2212 
2213 	if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
2214 		spin_lock(&device->peer_seq_lock);
2215 		newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2216 		device->peer_seq = newest_peer_seq;
2217 		spin_unlock(&device->peer_seq_lock);
2218 		/* wake up only if we actually changed device->peer_seq */
2219 		if (peer_seq == newest_peer_seq)
2220 			wake_up(&device->seq_wait);
2221 	}
2222 }
2223 
overlaps(sector_t s1,int l1,sector_t s2,int l2)2224 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
2225 {
2226 	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2227 }
2228 
2229 /* maybe change sync_ee into interval trees as well? */
overlapping_resync_write(struct drbd_device * device,struct drbd_peer_request * peer_req)2230 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2231 {
2232 	struct drbd_peer_request *rs_req;
2233 	bool rv = false;
2234 
2235 	spin_lock_irq(&device->resource->req_lock);
2236 	list_for_each_entry(rs_req, &device->sync_ee, w.list) {
2237 		if (overlaps(peer_req->i.sector, peer_req->i.size,
2238 			     rs_req->i.sector, rs_req->i.size)) {
2239 			rv = true;
2240 			break;
2241 		}
2242 	}
2243 	spin_unlock_irq(&device->resource->req_lock);
2244 
2245 	return rv;
2246 }
2247 
2248 /* Called from receive_Data.
2249  * Synchronize packets on sock with packets on msock.
2250  *
2251  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2252  * packet traveling on msock, they are still processed in the order they have
2253  * been sent.
2254  *
2255  * Note: we don't care for Ack packets overtaking P_DATA packets.
2256  *
2257  * In case packet_seq is larger than device->peer_seq number, there are
2258  * outstanding packets on the msock. We wait for them to arrive.
2259  * In case we are the logically next packet, we update device->peer_seq
2260  * ourselves. Correctly handles 32bit wrap around.
2261  *
2262  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2263  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2264  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2265  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2266  *
2267  * returns 0 if we may process the packet,
2268  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
wait_for_and_update_peer_seq(struct drbd_peer_device * peer_device,const u32 peer_seq)2269 static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
2270 {
2271 	struct drbd_device *device = peer_device->device;
2272 	DEFINE_WAIT(wait);
2273 	long timeout;
2274 	int ret = 0, tp;
2275 
2276 	if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
2277 		return 0;
2278 
2279 	spin_lock(&device->peer_seq_lock);
2280 	for (;;) {
2281 		if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2282 			device->peer_seq = seq_max(device->peer_seq, peer_seq);
2283 			break;
2284 		}
2285 
2286 		if (signal_pending(current)) {
2287 			ret = -ERESTARTSYS;
2288 			break;
2289 		}
2290 
2291 		rcu_read_lock();
2292 		tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
2293 		rcu_read_unlock();
2294 
2295 		if (!tp)
2296 			break;
2297 
2298 		/* Only need to wait if two_primaries is enabled */
2299 		prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2300 		spin_unlock(&device->peer_seq_lock);
2301 		rcu_read_lock();
2302 		timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
2303 		rcu_read_unlock();
2304 		timeout = schedule_timeout(timeout);
2305 		spin_lock(&device->peer_seq_lock);
2306 		if (!timeout) {
2307 			ret = -ETIMEDOUT;
2308 			drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
2309 			break;
2310 		}
2311 	}
2312 	spin_unlock(&device->peer_seq_lock);
2313 	finish_wait(&device->seq_wait, &wait);
2314 	return ret;
2315 }
2316 
2317 /* see also bio_flags_to_wire()
2318  * DRBD_REQ_*, because we need to semantically map the flags to data packet
2319  * flags and back. We may replicate to other kernel versions. */
wire_flags_to_bio_flags(u32 dpf)2320 static unsigned long wire_flags_to_bio_flags(u32 dpf)
2321 {
2322 	return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2323 		(dpf & DP_FUA ? REQ_FUA : 0) |
2324 		(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
2325 }
2326 
wire_flags_to_bio_op(u32 dpf)2327 static unsigned long wire_flags_to_bio_op(u32 dpf)
2328 {
2329 	if (dpf & DP_DISCARD)
2330 		return REQ_OP_WRITE_ZEROES;
2331 	else
2332 		return REQ_OP_WRITE;
2333 }
2334 
fail_postponed_requests(struct drbd_device * device,sector_t sector,unsigned int size)2335 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2336 				    unsigned int size)
2337 {
2338 	struct drbd_interval *i;
2339 
2340     repeat:
2341 	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2342 		struct drbd_request *req;
2343 		struct bio_and_error m;
2344 
2345 		if (!i->local)
2346 			continue;
2347 		req = container_of(i, struct drbd_request, i);
2348 		if (!(req->rq_state & RQ_POSTPONED))
2349 			continue;
2350 		req->rq_state &= ~RQ_POSTPONED;
2351 		__req_mod(req, NEG_ACKED, &m);
2352 		spin_unlock_irq(&device->resource->req_lock);
2353 		if (m.bio)
2354 			complete_master_bio(device, &m);
2355 		spin_lock_irq(&device->resource->req_lock);
2356 		goto repeat;
2357 	}
2358 }
2359 
handle_write_conflicts(struct drbd_device * device,struct drbd_peer_request * peer_req)2360 static int handle_write_conflicts(struct drbd_device *device,
2361 				  struct drbd_peer_request *peer_req)
2362 {
2363 	struct drbd_connection *connection = peer_req->peer_device->connection;
2364 	bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2365 	sector_t sector = peer_req->i.sector;
2366 	const unsigned int size = peer_req->i.size;
2367 	struct drbd_interval *i;
2368 	bool equal;
2369 	int err;
2370 
2371 	/*
2372 	 * Inserting the peer request into the write_requests tree will prevent
2373 	 * new conflicting local requests from being added.
2374 	 */
2375 	drbd_insert_interval(&device->write_requests, &peer_req->i);
2376 
2377     repeat:
2378 	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2379 		if (i == &peer_req->i)
2380 			continue;
2381 		if (i->completed)
2382 			continue;
2383 
2384 		if (!i->local) {
2385 			/*
2386 			 * Our peer has sent a conflicting remote request; this
2387 			 * should not happen in a two-node setup.  Wait for the
2388 			 * earlier peer request to complete.
2389 			 */
2390 			err = drbd_wait_misc(device, i);
2391 			if (err)
2392 				goto out;
2393 			goto repeat;
2394 		}
2395 
2396 		equal = i->sector == sector && i->size == size;
2397 		if (resolve_conflicts) {
2398 			/*
2399 			 * If the peer request is fully contained within the
2400 			 * overlapping request, it can be considered overwritten
2401 			 * and thus superseded; otherwise, it will be retried
2402 			 * once all overlapping requests have completed.
2403 			 */
2404 			bool superseded = i->sector <= sector && i->sector +
2405 				       (i->size >> 9) >= sector + (size >> 9);
2406 
2407 			if (!equal)
2408 				drbd_alert(device, "Concurrent writes detected: "
2409 					       "local=%llus +%u, remote=%llus +%u, "
2410 					       "assuming %s came first\n",
2411 					  (unsigned long long)i->sector, i->size,
2412 					  (unsigned long long)sector, size,
2413 					  superseded ? "local" : "remote");
2414 
2415 			peer_req->w.cb = superseded ? e_send_superseded :
2416 						   e_send_retry_write;
2417 			list_add_tail(&peer_req->w.list, &device->done_ee);
2418 			queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
2419 
2420 			err = -ENOENT;
2421 			goto out;
2422 		} else {
2423 			struct drbd_request *req =
2424 				container_of(i, struct drbd_request, i);
2425 
2426 			if (!equal)
2427 				drbd_alert(device, "Concurrent writes detected: "
2428 					       "local=%llus +%u, remote=%llus +%u\n",
2429 					  (unsigned long long)i->sector, i->size,
2430 					  (unsigned long long)sector, size);
2431 
2432 			if (req->rq_state & RQ_LOCAL_PENDING ||
2433 			    !(req->rq_state & RQ_POSTPONED)) {
2434 				/*
2435 				 * Wait for the node with the discard flag to
2436 				 * decide if this request has been superseded
2437 				 * or needs to be retried.
2438 				 * Requests that have been superseded will
2439 				 * disappear from the write_requests tree.
2440 				 *
2441 				 * In addition, wait for the conflicting
2442 				 * request to finish locally before submitting
2443 				 * the conflicting peer request.
2444 				 */
2445 				err = drbd_wait_misc(device, &req->i);
2446 				if (err) {
2447 					_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
2448 					fail_postponed_requests(device, sector, size);
2449 					goto out;
2450 				}
2451 				goto repeat;
2452 			}
2453 			/*
2454 			 * Remember to restart the conflicting requests after
2455 			 * the new peer request has completed.
2456 			 */
2457 			peer_req->flags |= EE_RESTART_REQUESTS;
2458 		}
2459 	}
2460 	err = 0;
2461 
2462     out:
2463 	if (err)
2464 		drbd_remove_epoch_entry_interval(device, peer_req);
2465 	return err;
2466 }
2467 
2468 /* mirrored write */
receive_Data(struct drbd_connection * connection,struct packet_info * pi)2469 static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
2470 {
2471 	struct drbd_peer_device *peer_device;
2472 	struct drbd_device *device;
2473 	struct net_conf *nc;
2474 	sector_t sector;
2475 	struct drbd_peer_request *peer_req;
2476 	struct p_data *p = pi->data;
2477 	u32 peer_seq = be32_to_cpu(p->seq_num);
2478 	int op, op_flags;
2479 	u32 dp_flags;
2480 	int err, tp;
2481 
2482 	peer_device = conn_peer_device(connection, pi->vnr);
2483 	if (!peer_device)
2484 		return -EIO;
2485 	device = peer_device->device;
2486 
2487 	if (!get_ldev(device)) {
2488 		int err2;
2489 
2490 		err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2491 		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2492 		atomic_inc(&connection->current_epoch->epoch_size);
2493 		err2 = drbd_drain_block(peer_device, pi->size);
2494 		if (!err)
2495 			err = err2;
2496 		return err;
2497 	}
2498 
2499 	/*
2500 	 * Corresponding put_ldev done either below (on various errors), or in
2501 	 * drbd_peer_request_endio, if we successfully submit the data at the
2502 	 * end of this function.
2503 	 */
2504 
2505 	sector = be64_to_cpu(p->sector);
2506 	peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2507 	if (!peer_req) {
2508 		put_ldev(device);
2509 		return -EIO;
2510 	}
2511 
2512 	peer_req->w.cb = e_end_block;
2513 	peer_req->submit_jif = jiffies;
2514 	peer_req->flags |= EE_APPLICATION;
2515 
2516 	dp_flags = be32_to_cpu(p->dp_flags);
2517 	op = wire_flags_to_bio_op(dp_flags);
2518 	op_flags = wire_flags_to_bio_flags(dp_flags);
2519 	if (pi->cmd == P_TRIM) {
2520 		D_ASSERT(peer_device, peer_req->i.size > 0);
2521 		D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
2522 		D_ASSERT(peer_device, peer_req->pages == NULL);
2523 	} else if (peer_req->pages == NULL) {
2524 		D_ASSERT(device, peer_req->i.size == 0);
2525 		D_ASSERT(device, dp_flags & DP_FLUSH);
2526 	}
2527 
2528 	if (dp_flags & DP_MAY_SET_IN_SYNC)
2529 		peer_req->flags |= EE_MAY_SET_IN_SYNC;
2530 
2531 	spin_lock(&connection->epoch_lock);
2532 	peer_req->epoch = connection->current_epoch;
2533 	atomic_inc(&peer_req->epoch->epoch_size);
2534 	atomic_inc(&peer_req->epoch->active);
2535 	spin_unlock(&connection->epoch_lock);
2536 
2537 	rcu_read_lock();
2538 	nc = rcu_dereference(peer_device->connection->net_conf);
2539 	tp = nc->two_primaries;
2540 	if (peer_device->connection->agreed_pro_version < 100) {
2541 		switch (nc->wire_protocol) {
2542 		case DRBD_PROT_C:
2543 			dp_flags |= DP_SEND_WRITE_ACK;
2544 			break;
2545 		case DRBD_PROT_B:
2546 			dp_flags |= DP_SEND_RECEIVE_ACK;
2547 			break;
2548 		}
2549 	}
2550 	rcu_read_unlock();
2551 
2552 	if (dp_flags & DP_SEND_WRITE_ACK) {
2553 		peer_req->flags |= EE_SEND_WRITE_ACK;
2554 		inc_unacked(device);
2555 		/* corresponding dec_unacked() in e_end_block()
2556 		 * respective _drbd_clear_done_ee */
2557 	}
2558 
2559 	if (dp_flags & DP_SEND_RECEIVE_ACK) {
2560 		/* I really don't like it that the receiver thread
2561 		 * sends on the msock, but anyways */
2562 		drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
2563 	}
2564 
2565 	if (tp) {
2566 		/* two primaries implies protocol C */
2567 		D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
2568 		peer_req->flags |= EE_IN_INTERVAL_TREE;
2569 		err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2570 		if (err)
2571 			goto out_interrupted;
2572 		spin_lock_irq(&device->resource->req_lock);
2573 		err = handle_write_conflicts(device, peer_req);
2574 		if (err) {
2575 			spin_unlock_irq(&device->resource->req_lock);
2576 			if (err == -ENOENT) {
2577 				put_ldev(device);
2578 				return 0;
2579 			}
2580 			goto out_interrupted;
2581 		}
2582 	} else {
2583 		update_peer_seq(peer_device, peer_seq);
2584 		spin_lock_irq(&device->resource->req_lock);
2585 	}
2586 	/* TRIM and WRITE_SAME are processed synchronously,
2587 	 * we wait for all pending requests, respectively wait for
2588 	 * active_ee to become empty in drbd_submit_peer_request();
2589 	 * better not add ourselves here. */
2590 	if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0)
2591 		list_add_tail(&peer_req->w.list, &device->active_ee);
2592 	spin_unlock_irq(&device->resource->req_lock);
2593 
2594 	if (device->state.conn == C_SYNC_TARGET)
2595 		wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2596 
2597 	if (device->state.pdsk < D_INCONSISTENT) {
2598 		/* In case we have the only disk of the cluster, */
2599 		drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2600 		peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2601 		drbd_al_begin_io(device, &peer_req->i);
2602 		peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2603 	}
2604 
2605 	err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2606 				       DRBD_FAULT_DT_WR);
2607 	if (!err)
2608 		return 0;
2609 
2610 	/* don't care for the reason here */
2611 	drbd_err(device, "submit failed, triggering re-connect\n");
2612 	spin_lock_irq(&device->resource->req_lock);
2613 	list_del(&peer_req->w.list);
2614 	drbd_remove_epoch_entry_interval(device, peer_req);
2615 	spin_unlock_irq(&device->resource->req_lock);
2616 	if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2617 		peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
2618 		drbd_al_complete_io(device, &peer_req->i);
2619 	}
2620 
2621 out_interrupted:
2622 	drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
2623 	put_ldev(device);
2624 	drbd_free_peer_req(device, peer_req);
2625 	return err;
2626 }
2627 
2628 /* We may throttle resync, if the lower device seems to be busy,
2629  * and current sync rate is above c_min_rate.
2630  *
2631  * To decide whether or not the lower device is busy, we use a scheme similar
2632  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2633  * (more than 64 sectors) of activity we cannot account for with our own resync
2634  * activity, it obviously is "busy".
2635  *
2636  * The current sync rate used here uses only the most recent two step marks,
2637  * to have a short time average so we can react faster.
2638  */
drbd_rs_should_slow_down(struct drbd_device * device,sector_t sector,bool throttle_if_app_is_waiting)2639 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2640 		bool throttle_if_app_is_waiting)
2641 {
2642 	struct lc_element *tmp;
2643 	bool throttle = drbd_rs_c_min_rate_throttle(device);
2644 
2645 	if (!throttle || throttle_if_app_is_waiting)
2646 		return throttle;
2647 
2648 	spin_lock_irq(&device->al_lock);
2649 	tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2650 	if (tmp) {
2651 		struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2652 		if (test_bit(BME_PRIORITY, &bm_ext->flags))
2653 			throttle = false;
2654 		/* Do not slow down if app IO is already waiting for this extent,
2655 		 * and our progress is necessary for application IO to complete. */
2656 	}
2657 	spin_unlock_irq(&device->al_lock);
2658 
2659 	return throttle;
2660 }
2661 
drbd_rs_c_min_rate_throttle(struct drbd_device * device)2662 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2663 {
2664 	struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2665 	unsigned long db, dt, dbdt;
2666 	unsigned int c_min_rate;
2667 	int curr_events;
2668 
2669 	rcu_read_lock();
2670 	c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2671 	rcu_read_unlock();
2672 
2673 	/* feature disabled? */
2674 	if (c_min_rate == 0)
2675 		return false;
2676 
2677 	curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
2678 			atomic_read(&device->rs_sect_ev);
2679 
2680 	if (atomic_read(&device->ap_actlog_cnt)
2681 	    || curr_events - device->rs_last_events > 64) {
2682 		unsigned long rs_left;
2683 		int i;
2684 
2685 		device->rs_last_events = curr_events;
2686 
2687 		/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2688 		 * approx. */
2689 		i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2690 
2691 		if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2692 			rs_left = device->ov_left;
2693 		else
2694 			rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2695 
2696 		dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2697 		if (!dt)
2698 			dt++;
2699 		db = device->rs_mark_left[i] - rs_left;
2700 		dbdt = Bit2KB(db/dt);
2701 
2702 		if (dbdt > c_min_rate)
2703 			return true;
2704 	}
2705 	return false;
2706 }
2707 
receive_DataRequest(struct drbd_connection * connection,struct packet_info * pi)2708 static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
2709 {
2710 	struct drbd_peer_device *peer_device;
2711 	struct drbd_device *device;
2712 	sector_t sector;
2713 	sector_t capacity;
2714 	struct drbd_peer_request *peer_req;
2715 	struct digest_info *di = NULL;
2716 	int size, verb;
2717 	unsigned int fault_type;
2718 	struct p_block_req *p =	pi->data;
2719 
2720 	peer_device = conn_peer_device(connection, pi->vnr);
2721 	if (!peer_device)
2722 		return -EIO;
2723 	device = peer_device->device;
2724 	capacity = drbd_get_capacity(device->this_bdev);
2725 
2726 	sector = be64_to_cpu(p->sector);
2727 	size   = be32_to_cpu(p->blksize);
2728 
2729 	if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2730 		drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2731 				(unsigned long long)sector, size);
2732 		return -EINVAL;
2733 	}
2734 	if (sector + (size>>9) > capacity) {
2735 		drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2736 				(unsigned long long)sector, size);
2737 		return -EINVAL;
2738 	}
2739 
2740 	if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2741 		verb = 1;
2742 		switch (pi->cmd) {
2743 		case P_DATA_REQUEST:
2744 			drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
2745 			break;
2746 		case P_RS_THIN_REQ:
2747 		case P_RS_DATA_REQUEST:
2748 		case P_CSUM_RS_REQUEST:
2749 		case P_OV_REQUEST:
2750 			drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
2751 			break;
2752 		case P_OV_REPLY:
2753 			verb = 0;
2754 			dec_rs_pending(device);
2755 			drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2756 			break;
2757 		default:
2758 			BUG();
2759 		}
2760 		if (verb && __ratelimit(&drbd_ratelimit_state))
2761 			drbd_err(device, "Can not satisfy peer's read request, "
2762 			    "no local data.\n");
2763 
2764 		/* drain possibly payload */
2765 		return drbd_drain_block(peer_device, pi->size);
2766 	}
2767 
2768 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2769 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
2770 	 * which in turn might block on the other node at this very place.  */
2771 	peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2772 			size, GFP_NOIO);
2773 	if (!peer_req) {
2774 		put_ldev(device);
2775 		return -ENOMEM;
2776 	}
2777 
2778 	switch (pi->cmd) {
2779 	case P_DATA_REQUEST:
2780 		peer_req->w.cb = w_e_end_data_req;
2781 		fault_type = DRBD_FAULT_DT_RD;
2782 		/* application IO, don't drbd_rs_begin_io */
2783 		peer_req->flags |= EE_APPLICATION;
2784 		goto submit;
2785 
2786 	case P_RS_THIN_REQ:
2787 		/* If at some point in the future we have a smart way to
2788 		   find out if this data block is completely deallocated,
2789 		   then we would do something smarter here than reading
2790 		   the block... */
2791 		peer_req->flags |= EE_RS_THIN_REQ;
2792 		/* fall through */
2793 	case P_RS_DATA_REQUEST:
2794 		peer_req->w.cb = w_e_end_rsdata_req;
2795 		fault_type = DRBD_FAULT_RS_RD;
2796 		/* used in the sector offset progress display */
2797 		device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2798 		break;
2799 
2800 	case P_OV_REPLY:
2801 	case P_CSUM_RS_REQUEST:
2802 		fault_type = DRBD_FAULT_RS_RD;
2803 		di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2804 		if (!di)
2805 			goto out_free_e;
2806 
2807 		di->digest_size = pi->size;
2808 		di->digest = (((char *)di)+sizeof(struct digest_info));
2809 
2810 		peer_req->digest = di;
2811 		peer_req->flags |= EE_HAS_DIGEST;
2812 
2813 		if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
2814 			goto out_free_e;
2815 
2816 		if (pi->cmd == P_CSUM_RS_REQUEST) {
2817 			D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2818 			peer_req->w.cb = w_e_end_csum_rs_req;
2819 			/* used in the sector offset progress display */
2820 			device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2821 			/* remember to report stats in drbd_resync_finished */
2822 			device->use_csums = true;
2823 		} else if (pi->cmd == P_OV_REPLY) {
2824 			/* track progress, we may need to throttle */
2825 			atomic_add(size >> 9, &device->rs_sect_in);
2826 			peer_req->w.cb = w_e_end_ov_reply;
2827 			dec_rs_pending(device);
2828 			/* drbd_rs_begin_io done when we sent this request,
2829 			 * but accounting still needs to be done. */
2830 			goto submit_for_resync;
2831 		}
2832 		break;
2833 
2834 	case P_OV_REQUEST:
2835 		if (device->ov_start_sector == ~(sector_t)0 &&
2836 		    peer_device->connection->agreed_pro_version >= 90) {
2837 			unsigned long now = jiffies;
2838 			int i;
2839 			device->ov_start_sector = sector;
2840 			device->ov_position = sector;
2841 			device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2842 			device->rs_total = device->ov_left;
2843 			for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2844 				device->rs_mark_left[i] = device->ov_left;
2845 				device->rs_mark_time[i] = now;
2846 			}
2847 			drbd_info(device, "Online Verify start sector: %llu\n",
2848 					(unsigned long long)sector);
2849 		}
2850 		peer_req->w.cb = w_e_end_ov_req;
2851 		fault_type = DRBD_FAULT_RS_RD;
2852 		break;
2853 
2854 	default:
2855 		BUG();
2856 	}
2857 
2858 	/* Throttle, drbd_rs_begin_io and submit should become asynchronous
2859 	 * wrt the receiver, but it is not as straightforward as it may seem.
2860 	 * Various places in the resync start and stop logic assume resync
2861 	 * requests are processed in order, requeuing this on the worker thread
2862 	 * introduces a bunch of new code for synchronization between threads.
2863 	 *
2864 	 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2865 	 * "forever", throttling after drbd_rs_begin_io will lock that extent
2866 	 * for application writes for the same time.  For now, just throttle
2867 	 * here, where the rest of the code expects the receiver to sleep for
2868 	 * a while, anyways.
2869 	 */
2870 
2871 	/* Throttle before drbd_rs_begin_io, as that locks out application IO;
2872 	 * this defers syncer requests for some time, before letting at least
2873 	 * on request through.  The resync controller on the receiving side
2874 	 * will adapt to the incoming rate accordingly.
2875 	 *
2876 	 * We cannot throttle here if remote is Primary/SyncTarget:
2877 	 * we would also throttle its application reads.
2878 	 * In that case, throttling is done on the SyncTarget only.
2879 	 */
2880 
2881 	/* Even though this may be a resync request, we do add to "read_ee";
2882 	 * "sync_ee" is only used for resync WRITEs.
2883 	 * Add to list early, so debugfs can find this request
2884 	 * even if we have to sleep below. */
2885 	spin_lock_irq(&device->resource->req_lock);
2886 	list_add_tail(&peer_req->w.list, &device->read_ee);
2887 	spin_unlock_irq(&device->resource->req_lock);
2888 
2889 	update_receiver_timing_details(connection, drbd_rs_should_slow_down);
2890 	if (device->state.peer != R_PRIMARY
2891 	&& drbd_rs_should_slow_down(device, sector, false))
2892 		schedule_timeout_uninterruptible(HZ/10);
2893 	update_receiver_timing_details(connection, drbd_rs_begin_io);
2894 	if (drbd_rs_begin_io(device, sector))
2895 		goto out_free_e;
2896 
2897 submit_for_resync:
2898 	atomic_add(size >> 9, &device->rs_sect_ev);
2899 
2900 submit:
2901 	update_receiver_timing_details(connection, drbd_submit_peer_request);
2902 	inc_unacked(device);
2903 	if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
2904 				     fault_type) == 0)
2905 		return 0;
2906 
2907 	/* don't care for the reason here */
2908 	drbd_err(device, "submit failed, triggering re-connect\n");
2909 
2910 out_free_e:
2911 	spin_lock_irq(&device->resource->req_lock);
2912 	list_del(&peer_req->w.list);
2913 	spin_unlock_irq(&device->resource->req_lock);
2914 	/* no drbd_rs_complete_io(), we are dropping the connection anyways */
2915 
2916 	put_ldev(device);
2917 	drbd_free_peer_req(device, peer_req);
2918 	return -EIO;
2919 }
2920 
2921 /**
2922  * drbd_asb_recover_0p  -  Recover after split-brain with no remaining primaries
2923  */
drbd_asb_recover_0p(struct drbd_peer_device * peer_device)2924 static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
2925 {
2926 	struct drbd_device *device = peer_device->device;
2927 	int self, peer, rv = -100;
2928 	unsigned long ch_self, ch_peer;
2929 	enum drbd_after_sb_p after_sb_0p;
2930 
2931 	self = device->ldev->md.uuid[UI_BITMAP] & 1;
2932 	peer = device->p_uuid[UI_BITMAP] & 1;
2933 
2934 	ch_peer = device->p_uuid[UI_SIZE];
2935 	ch_self = device->comm_bm_set;
2936 
2937 	rcu_read_lock();
2938 	after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
2939 	rcu_read_unlock();
2940 	switch (after_sb_0p) {
2941 	case ASB_CONSENSUS:
2942 	case ASB_DISCARD_SECONDARY:
2943 	case ASB_CALL_HELPER:
2944 	case ASB_VIOLENTLY:
2945 		drbd_err(device, "Configuration error.\n");
2946 		break;
2947 	case ASB_DISCONNECT:
2948 		break;
2949 	case ASB_DISCARD_YOUNGER_PRI:
2950 		if (self == 0 && peer == 1) {
2951 			rv = -1;
2952 			break;
2953 		}
2954 		if (self == 1 && peer == 0) {
2955 			rv =  1;
2956 			break;
2957 		}
2958 		/* Else fall through to one of the other strategies... */
2959 	case ASB_DISCARD_OLDER_PRI:
2960 		if (self == 0 && peer == 1) {
2961 			rv = 1;
2962 			break;
2963 		}
2964 		if (self == 1 && peer == 0) {
2965 			rv = -1;
2966 			break;
2967 		}
2968 		/* Else fall through to one of the other strategies... */
2969 		drbd_warn(device, "Discard younger/older primary did not find a decision\n"
2970 		     "Using discard-least-changes instead\n");
2971 		/* fall through */
2972 	case ASB_DISCARD_ZERO_CHG:
2973 		if (ch_peer == 0 && ch_self == 0) {
2974 			rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
2975 				? -1 : 1;
2976 			break;
2977 		} else {
2978 			if (ch_peer == 0) { rv =  1; break; }
2979 			if (ch_self == 0) { rv = -1; break; }
2980 		}
2981 		if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2982 			break;
2983 		/* else: fall through */
2984 	case ASB_DISCARD_LEAST_CHG:
2985 		if	(ch_self < ch_peer)
2986 			rv = -1;
2987 		else if (ch_self > ch_peer)
2988 			rv =  1;
2989 		else /* ( ch_self == ch_peer ) */
2990 		     /* Well, then use something else. */
2991 			rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
2992 				? -1 : 1;
2993 		break;
2994 	case ASB_DISCARD_LOCAL:
2995 		rv = -1;
2996 		break;
2997 	case ASB_DISCARD_REMOTE:
2998 		rv =  1;
2999 	}
3000 
3001 	return rv;
3002 }
3003 
3004 /**
3005  * drbd_asb_recover_1p  -  Recover after split-brain with one remaining primary
3006  */
drbd_asb_recover_1p(struct drbd_peer_device * peer_device)3007 static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
3008 {
3009 	struct drbd_device *device = peer_device->device;
3010 	int hg, rv = -100;
3011 	enum drbd_after_sb_p after_sb_1p;
3012 
3013 	rcu_read_lock();
3014 	after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
3015 	rcu_read_unlock();
3016 	switch (after_sb_1p) {
3017 	case ASB_DISCARD_YOUNGER_PRI:
3018 	case ASB_DISCARD_OLDER_PRI:
3019 	case ASB_DISCARD_LEAST_CHG:
3020 	case ASB_DISCARD_LOCAL:
3021 	case ASB_DISCARD_REMOTE:
3022 	case ASB_DISCARD_ZERO_CHG:
3023 		drbd_err(device, "Configuration error.\n");
3024 		break;
3025 	case ASB_DISCONNECT:
3026 		break;
3027 	case ASB_CONSENSUS:
3028 		hg = drbd_asb_recover_0p(peer_device);
3029 		if (hg == -1 && device->state.role == R_SECONDARY)
3030 			rv = hg;
3031 		if (hg == 1  && device->state.role == R_PRIMARY)
3032 			rv = hg;
3033 		break;
3034 	case ASB_VIOLENTLY:
3035 		rv = drbd_asb_recover_0p(peer_device);
3036 		break;
3037 	case ASB_DISCARD_SECONDARY:
3038 		return device->state.role == R_PRIMARY ? 1 : -1;
3039 	case ASB_CALL_HELPER:
3040 		hg = drbd_asb_recover_0p(peer_device);
3041 		if (hg == -1 && device->state.role == R_PRIMARY) {
3042 			enum drbd_state_rv rv2;
3043 
3044 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3045 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
3046 			  * we do not need to wait for the after state change work either. */
3047 			rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3048 			if (rv2 != SS_SUCCESS) {
3049 				drbd_khelper(device, "pri-lost-after-sb");
3050 			} else {
3051 				drbd_warn(device, "Successfully gave up primary role.\n");
3052 				rv = hg;
3053 			}
3054 		} else
3055 			rv = hg;
3056 	}
3057 
3058 	return rv;
3059 }
3060 
3061 /**
3062  * drbd_asb_recover_2p  -  Recover after split-brain with two remaining primaries
3063  */
drbd_asb_recover_2p(struct drbd_peer_device * peer_device)3064 static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
3065 {
3066 	struct drbd_device *device = peer_device->device;
3067 	int hg, rv = -100;
3068 	enum drbd_after_sb_p after_sb_2p;
3069 
3070 	rcu_read_lock();
3071 	after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
3072 	rcu_read_unlock();
3073 	switch (after_sb_2p) {
3074 	case ASB_DISCARD_YOUNGER_PRI:
3075 	case ASB_DISCARD_OLDER_PRI:
3076 	case ASB_DISCARD_LEAST_CHG:
3077 	case ASB_DISCARD_LOCAL:
3078 	case ASB_DISCARD_REMOTE:
3079 	case ASB_CONSENSUS:
3080 	case ASB_DISCARD_SECONDARY:
3081 	case ASB_DISCARD_ZERO_CHG:
3082 		drbd_err(device, "Configuration error.\n");
3083 		break;
3084 	case ASB_VIOLENTLY:
3085 		rv = drbd_asb_recover_0p(peer_device);
3086 		break;
3087 	case ASB_DISCONNECT:
3088 		break;
3089 	case ASB_CALL_HELPER:
3090 		hg = drbd_asb_recover_0p(peer_device);
3091 		if (hg == -1) {
3092 			enum drbd_state_rv rv2;
3093 
3094 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3095 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
3096 			  * we do not need to wait for the after state change work either. */
3097 			rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3098 			if (rv2 != SS_SUCCESS) {
3099 				drbd_khelper(device, "pri-lost-after-sb");
3100 			} else {
3101 				drbd_warn(device, "Successfully gave up primary role.\n");
3102 				rv = hg;
3103 			}
3104 		} else
3105 			rv = hg;
3106 	}
3107 
3108 	return rv;
3109 }
3110 
drbd_uuid_dump(struct drbd_device * device,char * text,u64 * uuid,u64 bits,u64 flags)3111 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
3112 			   u64 bits, u64 flags)
3113 {
3114 	if (!uuid) {
3115 		drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
3116 		return;
3117 	}
3118 	drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
3119 	     text,
3120 	     (unsigned long long)uuid[UI_CURRENT],
3121 	     (unsigned long long)uuid[UI_BITMAP],
3122 	     (unsigned long long)uuid[UI_HISTORY_START],
3123 	     (unsigned long long)uuid[UI_HISTORY_END],
3124 	     (unsigned long long)bits,
3125 	     (unsigned long long)flags);
3126 }
3127 
3128 /*
3129   100	after split brain try auto recover
3130     2	C_SYNC_SOURCE set BitMap
3131     1	C_SYNC_SOURCE use BitMap
3132     0	no Sync
3133    -1	C_SYNC_TARGET use BitMap
3134    -2	C_SYNC_TARGET set BitMap
3135  -100	after split brain, disconnect
3136 -1000	unrelated data
3137 -1091   requires proto 91
3138 -1096   requires proto 96
3139  */
3140 
drbd_uuid_compare(struct drbd_device * const device,enum drbd_role const peer_role,int * rule_nr)3141 static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
3142 {
3143 	struct drbd_peer_device *const peer_device = first_peer_device(device);
3144 	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
3145 	u64 self, peer;
3146 	int i, j;
3147 
3148 	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3149 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3150 
3151 	*rule_nr = 10;
3152 	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
3153 		return 0;
3154 
3155 	*rule_nr = 20;
3156 	if ((self == UUID_JUST_CREATED || self == (u64)0) &&
3157 	     peer != UUID_JUST_CREATED)
3158 		return -2;
3159 
3160 	*rule_nr = 30;
3161 	if (self != UUID_JUST_CREATED &&
3162 	    (peer == UUID_JUST_CREATED || peer == (u64)0))
3163 		return 2;
3164 
3165 	if (self == peer) {
3166 		int rct, dc; /* roles at crash time */
3167 
3168 		if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
3169 
3170 			if (connection->agreed_pro_version < 91)
3171 				return -1091;
3172 
3173 			if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3174 			    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
3175 				drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
3176 				drbd_uuid_move_history(device);
3177 				device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3178 				device->ldev->md.uuid[UI_BITMAP] = 0;
3179 
3180 				drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3181 					       device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3182 				*rule_nr = 34;
3183 			} else {
3184 				drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
3185 				*rule_nr = 36;
3186 			}
3187 
3188 			return 1;
3189 		}
3190 
3191 		if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
3192 
3193 			if (connection->agreed_pro_version < 91)
3194 				return -1091;
3195 
3196 			if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3197 			    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
3198 				drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
3199 
3200 				device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3201 				device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3202 				device->p_uuid[UI_BITMAP] = 0UL;
3203 
3204 				drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3205 				*rule_nr = 35;
3206 			} else {
3207 				drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
3208 				*rule_nr = 37;
3209 			}
3210 
3211 			return -1;
3212 		}
3213 
3214 		/* Common power [off|failure] */
3215 		rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3216 			(device->p_uuid[UI_FLAGS] & 2);
3217 		/* lowest bit is set when we were primary,
3218 		 * next bit (weight 2) is set when peer was primary */
3219 		*rule_nr = 40;
3220 
3221 		/* Neither has the "crashed primary" flag set,
3222 		 * only a replication link hickup. */
3223 		if (rct == 0)
3224 			return 0;
3225 
3226 		/* Current UUID equal and no bitmap uuid; does not necessarily
3227 		 * mean this was a "simultaneous hard crash", maybe IO was
3228 		 * frozen, so no UUID-bump happened.
3229 		 * This is a protocol change, overload DRBD_FF_WSAME as flag
3230 		 * for "new-enough" peer DRBD version. */
3231 		if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3232 			*rule_nr = 41;
3233 			if (!(connection->agreed_features & DRBD_FF_WSAME)) {
3234 				drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3235 				return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
3236 			}
3237 			if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3238 				/* At least one has the "crashed primary" bit set,
3239 				 * both are primary now, but neither has rotated its UUIDs?
3240 				 * "Can not happen." */
3241 				drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3242 				return -100;
3243 			}
3244 			if (device->state.role == R_PRIMARY)
3245 				return 1;
3246 			return -1;
3247 		}
3248 
3249 		/* Both are secondary.
3250 		 * Really looks like recovery from simultaneous hard crash.
3251 		 * Check which had been primary before, and arbitrate. */
3252 		switch (rct) {
3253 		case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
3254 		case 1: /*  self_pri && !peer_pri */ return 1;
3255 		case 2: /* !self_pri &&  peer_pri */ return -1;
3256 		case 3: /*  self_pri &&  peer_pri */
3257 			dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
3258 			return dc ? -1 : 1;
3259 		}
3260 	}
3261 
3262 	*rule_nr = 50;
3263 	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3264 	if (self == peer)
3265 		return -1;
3266 
3267 	*rule_nr = 51;
3268 	peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
3269 	if (self == peer) {
3270 		if (connection->agreed_pro_version < 96 ?
3271 		    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3272 		    (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3273 		    peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
3274 			/* The last P_SYNC_UUID did not get though. Undo the last start of
3275 			   resync as sync source modifications of the peer's UUIDs. */
3276 
3277 			if (connection->agreed_pro_version < 91)
3278 				return -1091;
3279 
3280 			device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3281 			device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
3282 
3283 			drbd_info(device, "Lost last syncUUID packet, corrected:\n");
3284 			drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3285 
3286 			return -1;
3287 		}
3288 	}
3289 
3290 	*rule_nr = 60;
3291 	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3292 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
3293 		peer = device->p_uuid[i] & ~((u64)1);
3294 		if (self == peer)
3295 			return -2;
3296 	}
3297 
3298 	*rule_nr = 70;
3299 	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3300 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3301 	if (self == peer)
3302 		return 1;
3303 
3304 	*rule_nr = 71;
3305 	self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
3306 	if (self == peer) {
3307 		if (connection->agreed_pro_version < 96 ?
3308 		    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3309 		    (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3310 		    self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
3311 			/* The last P_SYNC_UUID did not get though. Undo the last start of
3312 			   resync as sync source modifications of our UUIDs. */
3313 
3314 			if (connection->agreed_pro_version < 91)
3315 				return -1091;
3316 
3317 			__drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3318 			__drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
3319 
3320 			drbd_info(device, "Last syncUUID did not get through, corrected:\n");
3321 			drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3322 				       device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3323 
3324 			return 1;
3325 		}
3326 	}
3327 
3328 
3329 	*rule_nr = 80;
3330 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3331 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
3332 		self = device->ldev->md.uuid[i] & ~((u64)1);
3333 		if (self == peer)
3334 			return 2;
3335 	}
3336 
3337 	*rule_nr = 90;
3338 	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3339 	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3340 	if (self == peer && self != ((u64)0))
3341 		return 100;
3342 
3343 	*rule_nr = 100;
3344 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
3345 		self = device->ldev->md.uuid[i] & ~((u64)1);
3346 		for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
3347 			peer = device->p_uuid[j] & ~((u64)1);
3348 			if (self == peer)
3349 				return -100;
3350 		}
3351 	}
3352 
3353 	return -1000;
3354 }
3355 
3356 /* drbd_sync_handshake() returns the new conn state on success, or
3357    CONN_MASK (-1) on failure.
3358  */
drbd_sync_handshake(struct drbd_peer_device * peer_device,enum drbd_role peer_role,enum drbd_disk_state peer_disk)3359 static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3360 					   enum drbd_role peer_role,
3361 					   enum drbd_disk_state peer_disk) __must_hold(local)
3362 {
3363 	struct drbd_device *device = peer_device->device;
3364 	enum drbd_conns rv = C_MASK;
3365 	enum drbd_disk_state mydisk;
3366 	struct net_conf *nc;
3367 	int hg, rule_nr, rr_conflict, tentative;
3368 
3369 	mydisk = device->state.disk;
3370 	if (mydisk == D_NEGOTIATING)
3371 		mydisk = device->new_state_tmp.disk;
3372 
3373 	drbd_info(device, "drbd_sync_handshake:\n");
3374 
3375 	spin_lock_irq(&device->ldev->md.uuid_lock);
3376 	drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3377 	drbd_uuid_dump(device, "peer", device->p_uuid,
3378 		       device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3379 
3380 	hg = drbd_uuid_compare(device, peer_role, &rule_nr);
3381 	spin_unlock_irq(&device->ldev->md.uuid_lock);
3382 
3383 	drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
3384 
3385 	if (hg == -1000) {
3386 		drbd_alert(device, "Unrelated data, aborting!\n");
3387 		return C_MASK;
3388 	}
3389 	if (hg < -0x10000) {
3390 		int proto, fflags;
3391 		hg = -hg;
3392 		proto = hg & 0xff;
3393 		fflags = (hg >> 8) & 0xff;
3394 		drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3395 					proto, fflags);
3396 		return C_MASK;
3397 	}
3398 	if (hg < -1000) {
3399 		drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
3400 		return C_MASK;
3401 	}
3402 
3403 	if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
3404 	    (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
3405 		int f = (hg == -100) || abs(hg) == 2;
3406 		hg = mydisk > D_INCONSISTENT ? 1 : -1;
3407 		if (f)
3408 			hg = hg*2;
3409 		drbd_info(device, "Becoming sync %s due to disk states.\n",
3410 		     hg > 0 ? "source" : "target");
3411 	}
3412 
3413 	if (abs(hg) == 100)
3414 		drbd_khelper(device, "initial-split-brain");
3415 
3416 	rcu_read_lock();
3417 	nc = rcu_dereference(peer_device->connection->net_conf);
3418 
3419 	if (hg == 100 || (hg == -100 && nc->always_asbp)) {
3420 		int pcount = (device->state.role == R_PRIMARY)
3421 			   + (peer_role == R_PRIMARY);
3422 		int forced = (hg == -100);
3423 
3424 		switch (pcount) {
3425 		case 0:
3426 			hg = drbd_asb_recover_0p(peer_device);
3427 			break;
3428 		case 1:
3429 			hg = drbd_asb_recover_1p(peer_device);
3430 			break;
3431 		case 2:
3432 			hg = drbd_asb_recover_2p(peer_device);
3433 			break;
3434 		}
3435 		if (abs(hg) < 100) {
3436 			drbd_warn(device, "Split-Brain detected, %d primaries, "
3437 			     "automatically solved. Sync from %s node\n",
3438 			     pcount, (hg < 0) ? "peer" : "this");
3439 			if (forced) {
3440 				drbd_warn(device, "Doing a full sync, since"
3441 				     " UUIDs where ambiguous.\n");
3442 				hg = hg*2;
3443 			}
3444 		}
3445 	}
3446 
3447 	if (hg == -100) {
3448 		if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
3449 			hg = -1;
3450 		if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
3451 			hg = 1;
3452 
3453 		if (abs(hg) < 100)
3454 			drbd_warn(device, "Split-Brain detected, manually solved. "
3455 			     "Sync from %s node\n",
3456 			     (hg < 0) ? "peer" : "this");
3457 	}
3458 	rr_conflict = nc->rr_conflict;
3459 	tentative = nc->tentative;
3460 	rcu_read_unlock();
3461 
3462 	if (hg == -100) {
3463 		/* FIXME this log message is not correct if we end up here
3464 		 * after an attempted attach on a diskless node.
3465 		 * We just refuse to attach -- well, we drop the "connection"
3466 		 * to that disk, in a way... */
3467 		drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
3468 		drbd_khelper(device, "split-brain");
3469 		return C_MASK;
3470 	}
3471 
3472 	if (hg > 0 && mydisk <= D_INCONSISTENT) {
3473 		drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
3474 		return C_MASK;
3475 	}
3476 
3477 	if (hg < 0 && /* by intention we do not use mydisk here. */
3478 	    device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
3479 		switch (rr_conflict) {
3480 		case ASB_CALL_HELPER:
3481 			drbd_khelper(device, "pri-lost");
3482 			/* fall through */
3483 		case ASB_DISCONNECT:
3484 			drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
3485 			return C_MASK;
3486 		case ASB_VIOLENTLY:
3487 			drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
3488 			     "assumption\n");
3489 		}
3490 	}
3491 
3492 	if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
3493 		if (hg == 0)
3494 			drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
3495 		else
3496 			drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
3497 				 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3498 				 abs(hg) >= 2 ? "full" : "bit-map based");
3499 		return C_MASK;
3500 	}
3501 
3502 	if (abs(hg) >= 2) {
3503 		drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3504 		if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3505 					BM_LOCKED_SET_ALLOWED))
3506 			return C_MASK;
3507 	}
3508 
3509 	if (hg > 0) { /* become sync source. */
3510 		rv = C_WF_BITMAP_S;
3511 	} else if (hg < 0) { /* become sync target */
3512 		rv = C_WF_BITMAP_T;
3513 	} else {
3514 		rv = C_CONNECTED;
3515 		if (drbd_bm_total_weight(device)) {
3516 			drbd_info(device, "No resync, but %lu bits in bitmap!\n",
3517 			     drbd_bm_total_weight(device));
3518 		}
3519 	}
3520 
3521 	return rv;
3522 }
3523 
convert_after_sb(enum drbd_after_sb_p peer)3524 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3525 {
3526 	/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3527 	if (peer == ASB_DISCARD_REMOTE)
3528 		return ASB_DISCARD_LOCAL;
3529 
3530 	/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3531 	if (peer == ASB_DISCARD_LOCAL)
3532 		return ASB_DISCARD_REMOTE;
3533 
3534 	/* everything else is valid if they are equal on both sides. */
3535 	return peer;
3536 }
3537 
receive_protocol(struct drbd_connection * connection,struct packet_info * pi)3538 static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
3539 {
3540 	struct p_protocol *p = pi->data;
3541 	enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3542 	int p_proto, p_discard_my_data, p_two_primaries, cf;
3543 	struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3544 	char integrity_alg[SHARED_SECRET_MAX] = "";
3545 	struct crypto_ahash *peer_integrity_tfm = NULL;
3546 	void *int_dig_in = NULL, *int_dig_vv = NULL;
3547 
3548 	p_proto		= be32_to_cpu(p->protocol);
3549 	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
3550 	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
3551 	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
3552 	p_two_primaries = be32_to_cpu(p->two_primaries);
3553 	cf		= be32_to_cpu(p->conn_flags);
3554 	p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3555 
3556 	if (connection->agreed_pro_version >= 87) {
3557 		int err;
3558 
3559 		if (pi->size > sizeof(integrity_alg))
3560 			return -EIO;
3561 		err = drbd_recv_all(connection, integrity_alg, pi->size);
3562 		if (err)
3563 			return err;
3564 		integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3565 	}
3566 
3567 	if (pi->cmd != P_PROTOCOL_UPDATE) {
3568 		clear_bit(CONN_DRY_RUN, &connection->flags);
3569 
3570 		if (cf & CF_DRY_RUN)
3571 			set_bit(CONN_DRY_RUN, &connection->flags);
3572 
3573 		rcu_read_lock();
3574 		nc = rcu_dereference(connection->net_conf);
3575 
3576 		if (p_proto != nc->wire_protocol) {
3577 			drbd_err(connection, "incompatible %s settings\n", "protocol");
3578 			goto disconnect_rcu_unlock;
3579 		}
3580 
3581 		if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3582 			drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
3583 			goto disconnect_rcu_unlock;
3584 		}
3585 
3586 		if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3587 			drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
3588 			goto disconnect_rcu_unlock;
3589 		}
3590 
3591 		if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3592 			drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
3593 			goto disconnect_rcu_unlock;
3594 		}
3595 
3596 		if (p_discard_my_data && nc->discard_my_data) {
3597 			drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
3598 			goto disconnect_rcu_unlock;
3599 		}
3600 
3601 		if (p_two_primaries != nc->two_primaries) {
3602 			drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
3603 			goto disconnect_rcu_unlock;
3604 		}
3605 
3606 		if (strcmp(integrity_alg, nc->integrity_alg)) {
3607 			drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
3608 			goto disconnect_rcu_unlock;
3609 		}
3610 
3611 		rcu_read_unlock();
3612 	}
3613 
3614 	if (integrity_alg[0]) {
3615 		int hash_size;
3616 
3617 		/*
3618 		 * We can only change the peer data integrity algorithm
3619 		 * here.  Changing our own data integrity algorithm
3620 		 * requires that we send a P_PROTOCOL_UPDATE packet at
3621 		 * the same time; otherwise, the peer has no way to
3622 		 * tell between which packets the algorithm should
3623 		 * change.
3624 		 */
3625 
3626 		peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3627 		if (IS_ERR(peer_integrity_tfm)) {
3628 			peer_integrity_tfm = NULL;
3629 			drbd_err(connection, "peer data-integrity-alg %s not supported\n",
3630 				 integrity_alg);
3631 			goto disconnect;
3632 		}
3633 
3634 		hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
3635 		int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3636 		int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3637 		if (!(int_dig_in && int_dig_vv)) {
3638 			drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
3639 			goto disconnect;
3640 		}
3641 	}
3642 
3643 	new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3644 	if (!new_net_conf) {
3645 		drbd_err(connection, "Allocation of new net_conf failed\n");
3646 		goto disconnect;
3647 	}
3648 
3649 	mutex_lock(&connection->data.mutex);
3650 	mutex_lock(&connection->resource->conf_update);
3651 	old_net_conf = connection->net_conf;
3652 	*new_net_conf = *old_net_conf;
3653 
3654 	new_net_conf->wire_protocol = p_proto;
3655 	new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3656 	new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3657 	new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3658 	new_net_conf->two_primaries = p_two_primaries;
3659 
3660 	rcu_assign_pointer(connection->net_conf, new_net_conf);
3661 	mutex_unlock(&connection->resource->conf_update);
3662 	mutex_unlock(&connection->data.mutex);
3663 
3664 	crypto_free_ahash(connection->peer_integrity_tfm);
3665 	kfree(connection->int_dig_in);
3666 	kfree(connection->int_dig_vv);
3667 	connection->peer_integrity_tfm = peer_integrity_tfm;
3668 	connection->int_dig_in = int_dig_in;
3669 	connection->int_dig_vv = int_dig_vv;
3670 
3671 	if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3672 		drbd_info(connection, "peer data-integrity-alg: %s\n",
3673 			  integrity_alg[0] ? integrity_alg : "(none)");
3674 
3675 	synchronize_rcu();
3676 	kfree(old_net_conf);
3677 	return 0;
3678 
3679 disconnect_rcu_unlock:
3680 	rcu_read_unlock();
3681 disconnect:
3682 	crypto_free_ahash(peer_integrity_tfm);
3683 	kfree(int_dig_in);
3684 	kfree(int_dig_vv);
3685 	conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
3686 	return -EIO;
3687 }
3688 
3689 /* helper function
3690  * input: alg name, feature name
3691  * return: NULL (alg name was "")
3692  *         ERR_PTR(error) if something goes wrong
3693  *         or the crypto hash ptr, if it worked out ok. */
drbd_crypto_alloc_digest_safe(const struct drbd_device * device,const char * alg,const char * name)3694 static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
3695 		const char *alg, const char *name)
3696 {
3697 	struct crypto_ahash *tfm;
3698 
3699 	if (!alg[0])
3700 		return NULL;
3701 
3702 	tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
3703 	if (IS_ERR(tfm)) {
3704 		drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3705 			alg, name, PTR_ERR(tfm));
3706 		return tfm;
3707 	}
3708 	return tfm;
3709 }
3710 
ignore_remaining_packet(struct drbd_connection * connection,struct packet_info * pi)3711 static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
3712 {
3713 	void *buffer = connection->data.rbuf;
3714 	int size = pi->size;
3715 
3716 	while (size) {
3717 		int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3718 		s = drbd_recv(connection, buffer, s);
3719 		if (s <= 0) {
3720 			if (s < 0)
3721 				return s;
3722 			break;
3723 		}
3724 		size -= s;
3725 	}
3726 	if (size)
3727 		return -EIO;
3728 	return 0;
3729 }
3730 
3731 /*
3732  * config_unknown_volume  -  device configuration command for unknown volume
3733  *
3734  * When a device is added to an existing connection, the node on which the
3735  * device is added first will send configuration commands to its peer but the
3736  * peer will not know about the device yet.  It will warn and ignore these
3737  * commands.  Once the device is added on the second node, the second node will
3738  * send the same device configuration commands, but in the other direction.
3739  *
3740  * (We can also end up here if drbd is misconfigured.)
3741  */
config_unknown_volume(struct drbd_connection * connection,struct packet_info * pi)3742 static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
3743 {
3744 	drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
3745 		  cmdname(pi->cmd), pi->vnr);
3746 	return ignore_remaining_packet(connection, pi);
3747 }
3748 
receive_SyncParam(struct drbd_connection * connection,struct packet_info * pi)3749 static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
3750 {
3751 	struct drbd_peer_device *peer_device;
3752 	struct drbd_device *device;
3753 	struct p_rs_param_95 *p;
3754 	unsigned int header_size, data_size, exp_max_sz;
3755 	struct crypto_ahash *verify_tfm = NULL;
3756 	struct crypto_ahash *csums_tfm = NULL;
3757 	struct net_conf *old_net_conf, *new_net_conf = NULL;
3758 	struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3759 	const int apv = connection->agreed_pro_version;
3760 	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3761 	int fifo_size = 0;
3762 	int err;
3763 
3764 	peer_device = conn_peer_device(connection, pi->vnr);
3765 	if (!peer_device)
3766 		return config_unknown_volume(connection, pi);
3767 	device = peer_device->device;
3768 
3769 	exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
3770 		    : apv == 88 ? sizeof(struct p_rs_param)
3771 					+ SHARED_SECRET_MAX
3772 		    : apv <= 94 ? sizeof(struct p_rs_param_89)
3773 		    : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3774 
3775 	if (pi->size > exp_max_sz) {
3776 		drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3777 		    pi->size, exp_max_sz);
3778 		return -EIO;
3779 	}
3780 
3781 	if (apv <= 88) {
3782 		header_size = sizeof(struct p_rs_param);
3783 		data_size = pi->size - header_size;
3784 	} else if (apv <= 94) {
3785 		header_size = sizeof(struct p_rs_param_89);
3786 		data_size = pi->size - header_size;
3787 		D_ASSERT(device, data_size == 0);
3788 	} else {
3789 		header_size = sizeof(struct p_rs_param_95);
3790 		data_size = pi->size - header_size;
3791 		D_ASSERT(device, data_size == 0);
3792 	}
3793 
3794 	/* initialize verify_alg and csums_alg */
3795 	p = pi->data;
3796 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3797 
3798 	err = drbd_recv_all(peer_device->connection, p, header_size);
3799 	if (err)
3800 		return err;
3801 
3802 	mutex_lock(&connection->resource->conf_update);
3803 	old_net_conf = peer_device->connection->net_conf;
3804 	if (get_ldev(device)) {
3805 		new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3806 		if (!new_disk_conf) {
3807 			put_ldev(device);
3808 			mutex_unlock(&connection->resource->conf_update);
3809 			drbd_err(device, "Allocation of new disk_conf failed\n");
3810 			return -ENOMEM;
3811 		}
3812 
3813 		old_disk_conf = device->ldev->disk_conf;
3814 		*new_disk_conf = *old_disk_conf;
3815 
3816 		new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3817 	}
3818 
3819 	if (apv >= 88) {
3820 		if (apv == 88) {
3821 			if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3822 				drbd_err(device, "verify-alg of wrong size, "
3823 					"peer wants %u, accepting only up to %u byte\n",
3824 					data_size, SHARED_SECRET_MAX);
3825 				err = -EIO;
3826 				goto reconnect;
3827 			}
3828 
3829 			err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
3830 			if (err)
3831 				goto reconnect;
3832 			/* we expect NUL terminated string */
3833 			/* but just in case someone tries to be evil */
3834 			D_ASSERT(device, p->verify_alg[data_size-1] == 0);
3835 			p->verify_alg[data_size-1] = 0;
3836 
3837 		} else /* apv >= 89 */ {
3838 			/* we still expect NUL terminated strings */
3839 			/* but just in case someone tries to be evil */
3840 			D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3841 			D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3842 			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3843 			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3844 		}
3845 
3846 		if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3847 			if (device->state.conn == C_WF_REPORT_PARAMS) {
3848 				drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3849 				    old_net_conf->verify_alg, p->verify_alg);
3850 				goto disconnect;
3851 			}
3852 			verify_tfm = drbd_crypto_alloc_digest_safe(device,
3853 					p->verify_alg, "verify-alg");
3854 			if (IS_ERR(verify_tfm)) {
3855 				verify_tfm = NULL;
3856 				goto disconnect;
3857 			}
3858 		}
3859 
3860 		if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3861 			if (device->state.conn == C_WF_REPORT_PARAMS) {
3862 				drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3863 				    old_net_conf->csums_alg, p->csums_alg);
3864 				goto disconnect;
3865 			}
3866 			csums_tfm = drbd_crypto_alloc_digest_safe(device,
3867 					p->csums_alg, "csums-alg");
3868 			if (IS_ERR(csums_tfm)) {
3869 				csums_tfm = NULL;
3870 				goto disconnect;
3871 			}
3872 		}
3873 
3874 		if (apv > 94 && new_disk_conf) {
3875 			new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3876 			new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3877 			new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3878 			new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3879 
3880 			fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3881 			if (fifo_size != device->rs_plan_s->size) {
3882 				new_plan = fifo_alloc(fifo_size);
3883 				if (!new_plan) {
3884 					drbd_err(device, "kmalloc of fifo_buffer failed");
3885 					put_ldev(device);
3886 					goto disconnect;
3887 				}
3888 			}
3889 		}
3890 
3891 		if (verify_tfm || csums_tfm) {
3892 			new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3893 			if (!new_net_conf) {
3894 				drbd_err(device, "Allocation of new net_conf failed\n");
3895 				goto disconnect;
3896 			}
3897 
3898 			*new_net_conf = *old_net_conf;
3899 
3900 			if (verify_tfm) {
3901 				strcpy(new_net_conf->verify_alg, p->verify_alg);
3902 				new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3903 				crypto_free_ahash(peer_device->connection->verify_tfm);
3904 				peer_device->connection->verify_tfm = verify_tfm;
3905 				drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
3906 			}
3907 			if (csums_tfm) {
3908 				strcpy(new_net_conf->csums_alg, p->csums_alg);
3909 				new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3910 				crypto_free_ahash(peer_device->connection->csums_tfm);
3911 				peer_device->connection->csums_tfm = csums_tfm;
3912 				drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
3913 			}
3914 			rcu_assign_pointer(connection->net_conf, new_net_conf);
3915 		}
3916 	}
3917 
3918 	if (new_disk_conf) {
3919 		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3920 		put_ldev(device);
3921 	}
3922 
3923 	if (new_plan) {
3924 		old_plan = device->rs_plan_s;
3925 		rcu_assign_pointer(device->rs_plan_s, new_plan);
3926 	}
3927 
3928 	mutex_unlock(&connection->resource->conf_update);
3929 	synchronize_rcu();
3930 	if (new_net_conf)
3931 		kfree(old_net_conf);
3932 	kfree(old_disk_conf);
3933 	kfree(old_plan);
3934 
3935 	return 0;
3936 
3937 reconnect:
3938 	if (new_disk_conf) {
3939 		put_ldev(device);
3940 		kfree(new_disk_conf);
3941 	}
3942 	mutex_unlock(&connection->resource->conf_update);
3943 	return -EIO;
3944 
3945 disconnect:
3946 	kfree(new_plan);
3947 	if (new_disk_conf) {
3948 		put_ldev(device);
3949 		kfree(new_disk_conf);
3950 	}
3951 	mutex_unlock(&connection->resource->conf_update);
3952 	/* just for completeness: actually not needed,
3953 	 * as this is not reached if csums_tfm was ok. */
3954 	crypto_free_ahash(csums_tfm);
3955 	/* but free the verify_tfm again, if csums_tfm did not work out */
3956 	crypto_free_ahash(verify_tfm);
3957 	conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3958 	return -EIO;
3959 }
3960 
3961 /* warn if the arguments differ by more than 12.5% */
warn_if_differ_considerably(struct drbd_device * device,const char * s,sector_t a,sector_t b)3962 static void warn_if_differ_considerably(struct drbd_device *device,
3963 	const char *s, sector_t a, sector_t b)
3964 {
3965 	sector_t d;
3966 	if (a == 0 || b == 0)
3967 		return;
3968 	d = (a > b) ? (a - b) : (b - a);
3969 	if (d > (a>>3) || d > (b>>3))
3970 		drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
3971 		     (unsigned long long)a, (unsigned long long)b);
3972 }
3973 
receive_sizes(struct drbd_connection * connection,struct packet_info * pi)3974 static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
3975 {
3976 	struct drbd_peer_device *peer_device;
3977 	struct drbd_device *device;
3978 	struct p_sizes *p = pi->data;
3979 	struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
3980 	enum determine_dev_size dd = DS_UNCHANGED;
3981 	sector_t p_size, p_usize, p_csize, my_usize;
3982 	int ldsc = 0; /* local disk size changed */
3983 	enum dds_flags ddsf;
3984 
3985 	peer_device = conn_peer_device(connection, pi->vnr);
3986 	if (!peer_device)
3987 		return config_unknown_volume(connection, pi);
3988 	device = peer_device->device;
3989 
3990 	p_size = be64_to_cpu(p->d_size);
3991 	p_usize = be64_to_cpu(p->u_size);
3992 	p_csize = be64_to_cpu(p->c_size);
3993 
3994 	/* just store the peer's disk size for now.
3995 	 * we still need to figure out whether we accept that. */
3996 	device->p_size = p_size;
3997 
3998 	if (get_ldev(device)) {
3999 		sector_t new_size, cur_size;
4000 		rcu_read_lock();
4001 		my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
4002 		rcu_read_unlock();
4003 
4004 		warn_if_differ_considerably(device, "lower level device sizes",
4005 			   p_size, drbd_get_max_capacity(device->ldev));
4006 		warn_if_differ_considerably(device, "user requested size",
4007 					    p_usize, my_usize);
4008 
4009 		/* if this is the first connect, or an otherwise expected
4010 		 * param exchange, choose the minimum */
4011 		if (device->state.conn == C_WF_REPORT_PARAMS)
4012 			p_usize = min_not_zero(my_usize, p_usize);
4013 
4014 		/* Never shrink a device with usable data during connect.
4015 		   But allow online shrinking if we are connected. */
4016 		new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
4017 		cur_size = drbd_get_capacity(device->this_bdev);
4018 		if (new_size < cur_size &&
4019 		    device->state.disk >= D_OUTDATED &&
4020 		    device->state.conn < C_CONNECTED) {
4021 			drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4022 					(unsigned long long)new_size, (unsigned long long)cur_size);
4023 			conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4024 			put_ldev(device);
4025 			return -EIO;
4026 		}
4027 
4028 		if (my_usize != p_usize) {
4029 			struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
4030 
4031 			new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
4032 			if (!new_disk_conf) {
4033 				drbd_err(device, "Allocation of new disk_conf failed\n");
4034 				put_ldev(device);
4035 				return -ENOMEM;
4036 			}
4037 
4038 			mutex_lock(&connection->resource->conf_update);
4039 			old_disk_conf = device->ldev->disk_conf;
4040 			*new_disk_conf = *old_disk_conf;
4041 			new_disk_conf->disk_size = p_usize;
4042 
4043 			rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4044 			mutex_unlock(&connection->resource->conf_update);
4045 			synchronize_rcu();
4046 			kfree(old_disk_conf);
4047 
4048 			drbd_info(device, "Peer sets u_size to %lu sectors\n",
4049 				 (unsigned long)my_usize);
4050 		}
4051 
4052 		put_ldev(device);
4053 	}
4054 
4055 	device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
4056 	/* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
4057 	   In case we cleared the QUEUE_FLAG_DISCARD from our queue in
4058 	   drbd_reconsider_queue_parameters(), we can be sure that after
4059 	   drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
4060 
4061 	ddsf = be16_to_cpu(p->dds_flags);
4062 	if (get_ldev(device)) {
4063 		drbd_reconsider_queue_parameters(device, device->ldev, o);
4064 		dd = drbd_determine_dev_size(device, ddsf, NULL);
4065 		put_ldev(device);
4066 		if (dd == DS_ERROR)
4067 			return -EIO;
4068 		drbd_md_sync(device);
4069 	} else {
4070 		/*
4071 		 * I am diskless, need to accept the peer's *current* size.
4072 		 * I must NOT accept the peers backing disk size,
4073 		 * it may have been larger than mine all along...
4074 		 *
4075 		 * At this point, the peer knows more about my disk, or at
4076 		 * least about what we last agreed upon, than myself.
4077 		 * So if his c_size is less than his d_size, the most likely
4078 		 * reason is that *my* d_size was smaller last time we checked.
4079 		 *
4080 		 * However, if he sends a zero current size,
4081 		 * take his (user-capped or) backing disk size anyways.
4082 		 */
4083 		drbd_reconsider_queue_parameters(device, NULL, o);
4084 		drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
4085 	}
4086 
4087 	if (get_ldev(device)) {
4088 		if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4089 			device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
4090 			ldsc = 1;
4091 		}
4092 
4093 		put_ldev(device);
4094 	}
4095 
4096 	if (device->state.conn > C_WF_REPORT_PARAMS) {
4097 		if (be64_to_cpu(p->c_size) !=
4098 		    drbd_get_capacity(device->this_bdev) || ldsc) {
4099 			/* we have different sizes, probably peer
4100 			 * needs to know my new size... */
4101 			drbd_send_sizes(peer_device, 0, ddsf);
4102 		}
4103 		if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4104 		    (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4105 			if (device->state.pdsk >= D_INCONSISTENT &&
4106 			    device->state.disk >= D_INCONSISTENT) {
4107 				if (ddsf & DDSF_NO_RESYNC)
4108 					drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
4109 				else
4110 					resync_after_online_grow(device);
4111 			} else
4112 				set_bit(RESYNC_AFTER_NEG, &device->flags);
4113 		}
4114 	}
4115 
4116 	return 0;
4117 }
4118 
receive_uuids(struct drbd_connection * connection,struct packet_info * pi)4119 static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
4120 {
4121 	struct drbd_peer_device *peer_device;
4122 	struct drbd_device *device;
4123 	struct p_uuids *p = pi->data;
4124 	u64 *p_uuid;
4125 	int i, updated_uuids = 0;
4126 
4127 	peer_device = conn_peer_device(connection, pi->vnr);
4128 	if (!peer_device)
4129 		return config_unknown_volume(connection, pi);
4130 	device = peer_device->device;
4131 
4132 	p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
4133 	if (!p_uuid) {
4134 		drbd_err(device, "kmalloc of p_uuid failed\n");
4135 		return false;
4136 	}
4137 
4138 	for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
4139 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
4140 
4141 	kfree(device->p_uuid);
4142 	device->p_uuid = p_uuid;
4143 
4144 	if (device->state.conn < C_CONNECTED &&
4145 	    device->state.disk < D_INCONSISTENT &&
4146 	    device->state.role == R_PRIMARY &&
4147 	    (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
4148 		drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
4149 		    (unsigned long long)device->ed_uuid);
4150 		conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4151 		return -EIO;
4152 	}
4153 
4154 	if (get_ldev(device)) {
4155 		int skip_initial_sync =
4156 			device->state.conn == C_CONNECTED &&
4157 			peer_device->connection->agreed_pro_version >= 90 &&
4158 			device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
4159 			(p_uuid[UI_FLAGS] & 8);
4160 		if (skip_initial_sync) {
4161 			drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
4162 			drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4163 					"clear_n_write from receive_uuids",
4164 					BM_LOCKED_TEST_ALLOWED);
4165 			_drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4166 			_drbd_uuid_set(device, UI_BITMAP, 0);
4167 			_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4168 					CS_VERBOSE, NULL);
4169 			drbd_md_sync(device);
4170 			updated_uuids = 1;
4171 		}
4172 		put_ldev(device);
4173 	} else if (device->state.disk < D_INCONSISTENT &&
4174 		   device->state.role == R_PRIMARY) {
4175 		/* I am a diskless primary, the peer just created a new current UUID
4176 		   for me. */
4177 		updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4178 	}
4179 
4180 	/* Before we test for the disk state, we should wait until an eventually
4181 	   ongoing cluster wide state change is finished. That is important if
4182 	   we are primary and are detaching from our disk. We need to see the
4183 	   new disk state... */
4184 	mutex_lock(device->state_mutex);
4185 	mutex_unlock(device->state_mutex);
4186 	if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4187 		updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4188 
4189 	if (updated_uuids)
4190 		drbd_print_uuids(device, "receiver updated UUIDs to");
4191 
4192 	return 0;
4193 }
4194 
4195 /**
4196  * convert_state() - Converts the peer's view of the cluster state to our point of view
4197  * @ps:		The state as seen by the peer.
4198  */
convert_state(union drbd_state ps)4199 static union drbd_state convert_state(union drbd_state ps)
4200 {
4201 	union drbd_state ms;
4202 
4203 	static enum drbd_conns c_tab[] = {
4204 		[C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
4205 		[C_CONNECTED] = C_CONNECTED,
4206 
4207 		[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
4208 		[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
4209 		[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
4210 		[C_VERIFY_S]       = C_VERIFY_T,
4211 		[C_MASK]   = C_MASK,
4212 	};
4213 
4214 	ms.i = ps.i;
4215 
4216 	ms.conn = c_tab[ps.conn];
4217 	ms.peer = ps.role;
4218 	ms.role = ps.peer;
4219 	ms.pdsk = ps.disk;
4220 	ms.disk = ps.pdsk;
4221 	ms.peer_isp = (ps.aftr_isp | ps.user_isp);
4222 
4223 	return ms;
4224 }
4225 
receive_req_state(struct drbd_connection * connection,struct packet_info * pi)4226 static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
4227 {
4228 	struct drbd_peer_device *peer_device;
4229 	struct drbd_device *device;
4230 	struct p_req_state *p = pi->data;
4231 	union drbd_state mask, val;
4232 	enum drbd_state_rv rv;
4233 
4234 	peer_device = conn_peer_device(connection, pi->vnr);
4235 	if (!peer_device)
4236 		return -EIO;
4237 	device = peer_device->device;
4238 
4239 	mask.i = be32_to_cpu(p->mask);
4240 	val.i = be32_to_cpu(p->val);
4241 
4242 	if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
4243 	    mutex_is_locked(device->state_mutex)) {
4244 		drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
4245 		return 0;
4246 	}
4247 
4248 	mask = convert_state(mask);
4249 	val = convert_state(val);
4250 
4251 	rv = drbd_change_state(device, CS_VERBOSE, mask, val);
4252 	drbd_send_sr_reply(peer_device, rv);
4253 
4254 	drbd_md_sync(device);
4255 
4256 	return 0;
4257 }
4258 
receive_req_conn_state(struct drbd_connection * connection,struct packet_info * pi)4259 static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
4260 {
4261 	struct p_req_state *p = pi->data;
4262 	union drbd_state mask, val;
4263 	enum drbd_state_rv rv;
4264 
4265 	mask.i = be32_to_cpu(p->mask);
4266 	val.i = be32_to_cpu(p->val);
4267 
4268 	if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
4269 	    mutex_is_locked(&connection->cstate_mutex)) {
4270 		conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
4271 		return 0;
4272 	}
4273 
4274 	mask = convert_state(mask);
4275 	val = convert_state(val);
4276 
4277 	rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
4278 	conn_send_sr_reply(connection, rv);
4279 
4280 	return 0;
4281 }
4282 
receive_state(struct drbd_connection * connection,struct packet_info * pi)4283 static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
4284 {
4285 	struct drbd_peer_device *peer_device;
4286 	struct drbd_device *device;
4287 	struct p_state *p = pi->data;
4288 	union drbd_state os, ns, peer_state;
4289 	enum drbd_disk_state real_peer_disk;
4290 	enum chg_state_flags cs_flags;
4291 	int rv;
4292 
4293 	peer_device = conn_peer_device(connection, pi->vnr);
4294 	if (!peer_device)
4295 		return config_unknown_volume(connection, pi);
4296 	device = peer_device->device;
4297 
4298 	peer_state.i = be32_to_cpu(p->state);
4299 
4300 	real_peer_disk = peer_state.disk;
4301 	if (peer_state.disk == D_NEGOTIATING) {
4302 		real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
4303 		drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
4304 	}
4305 
4306 	spin_lock_irq(&device->resource->req_lock);
4307  retry:
4308 	os = ns = drbd_read_state(device);
4309 	spin_unlock_irq(&device->resource->req_lock);
4310 
4311 	/* If some other part of the code (ack_receiver thread, timeout)
4312 	 * already decided to close the connection again,
4313 	 * we must not "re-establish" it here. */
4314 	if (os.conn <= C_TEAR_DOWN)
4315 		return -ECONNRESET;
4316 
4317 	/* If this is the "end of sync" confirmation, usually the peer disk
4318 	 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
4319 	 * set) resync started in PausedSyncT, or if the timing of pause-/
4320 	 * unpause-sync events has been "just right", the peer disk may
4321 	 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
4322 	 */
4323 	if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
4324 	    real_peer_disk == D_UP_TO_DATE &&
4325 	    os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
4326 		/* If we are (becoming) SyncSource, but peer is still in sync
4327 		 * preparation, ignore its uptodate-ness to avoid flapping, it
4328 		 * will change to inconsistent once the peer reaches active
4329 		 * syncing states.
4330 		 * It may have changed syncer-paused flags, however, so we
4331 		 * cannot ignore this completely. */
4332 		if (peer_state.conn > C_CONNECTED &&
4333 		    peer_state.conn < C_SYNC_SOURCE)
4334 			real_peer_disk = D_INCONSISTENT;
4335 
4336 		/* if peer_state changes to connected at the same time,
4337 		 * it explicitly notifies us that it finished resync.
4338 		 * Maybe we should finish it up, too? */
4339 		else if (os.conn >= C_SYNC_SOURCE &&
4340 			 peer_state.conn == C_CONNECTED) {
4341 			if (drbd_bm_total_weight(device) <= device->rs_failed)
4342 				drbd_resync_finished(device);
4343 			return 0;
4344 		}
4345 	}
4346 
4347 	/* explicit verify finished notification, stop sector reached. */
4348 	if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
4349 	    peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
4350 		ov_out_of_sync_print(device);
4351 		drbd_resync_finished(device);
4352 		return 0;
4353 	}
4354 
4355 	/* peer says his disk is inconsistent, while we think it is uptodate,
4356 	 * and this happens while the peer still thinks we have a sync going on,
4357 	 * but we think we are already done with the sync.
4358 	 * We ignore this to avoid flapping pdsk.
4359 	 * This should not happen, if the peer is a recent version of drbd. */
4360 	if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
4361 	    os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
4362 		real_peer_disk = D_UP_TO_DATE;
4363 
4364 	if (ns.conn == C_WF_REPORT_PARAMS)
4365 		ns.conn = C_CONNECTED;
4366 
4367 	if (peer_state.conn == C_AHEAD)
4368 		ns.conn = C_BEHIND;
4369 
4370 	if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4371 	    get_ldev_if_state(device, D_NEGOTIATING)) {
4372 		int cr; /* consider resync */
4373 
4374 		/* if we established a new connection */
4375 		cr  = (os.conn < C_CONNECTED);
4376 		/* if we had an established connection
4377 		 * and one of the nodes newly attaches a disk */
4378 		cr |= (os.conn == C_CONNECTED &&
4379 		       (peer_state.disk == D_NEGOTIATING ||
4380 			os.disk == D_NEGOTIATING));
4381 		/* if we have both been inconsistent, and the peer has been
4382 		 * forced to be UpToDate with --overwrite-data */
4383 		cr |= test_bit(CONSIDER_RESYNC, &device->flags);
4384 		/* if we had been plain connected, and the admin requested to
4385 		 * start a sync by "invalidate" or "invalidate-remote" */
4386 		cr |= (os.conn == C_CONNECTED &&
4387 				(peer_state.conn >= C_STARTING_SYNC_S &&
4388 				 peer_state.conn <= C_WF_BITMAP_T));
4389 
4390 		if (cr)
4391 			ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
4392 
4393 		put_ldev(device);
4394 		if (ns.conn == C_MASK) {
4395 			ns.conn = C_CONNECTED;
4396 			if (device->state.disk == D_NEGOTIATING) {
4397 				drbd_force_state(device, NS(disk, D_FAILED));
4398 			} else if (peer_state.disk == D_NEGOTIATING) {
4399 				drbd_err(device, "Disk attach process on the peer node was aborted.\n");
4400 				peer_state.disk = D_DISKLESS;
4401 				real_peer_disk = D_DISKLESS;
4402 			} else {
4403 				if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
4404 					return -EIO;
4405 				D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
4406 				conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4407 				return -EIO;
4408 			}
4409 		}
4410 	}
4411 
4412 	spin_lock_irq(&device->resource->req_lock);
4413 	if (os.i != drbd_read_state(device).i)
4414 		goto retry;
4415 	clear_bit(CONSIDER_RESYNC, &device->flags);
4416 	ns.peer = peer_state.role;
4417 	ns.pdsk = real_peer_disk;
4418 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4419 	if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
4420 		ns.disk = device->new_state_tmp.disk;
4421 	cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
4422 	if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4423 	    test_bit(NEW_CUR_UUID, &device->flags)) {
4424 		/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
4425 		   for temporal network outages! */
4426 		spin_unlock_irq(&device->resource->req_lock);
4427 		drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
4428 		tl_clear(peer_device->connection);
4429 		drbd_uuid_new_current(device);
4430 		clear_bit(NEW_CUR_UUID, &device->flags);
4431 		conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
4432 		return -EIO;
4433 	}
4434 	rv = _drbd_set_state(device, ns, cs_flags, NULL);
4435 	ns = drbd_read_state(device);
4436 	spin_unlock_irq(&device->resource->req_lock);
4437 
4438 	if (rv < SS_SUCCESS) {
4439 		conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4440 		return -EIO;
4441 	}
4442 
4443 	if (os.conn > C_WF_REPORT_PARAMS) {
4444 		if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
4445 		    peer_state.disk != D_NEGOTIATING ) {
4446 			/* we want resync, peer has not yet decided to sync... */
4447 			/* Nowadays only used when forcing a node into primary role and
4448 			   setting its disk to UpToDate with that */
4449 			drbd_send_uuids(peer_device);
4450 			drbd_send_current_state(peer_device);
4451 		}
4452 	}
4453 
4454 	clear_bit(DISCARD_MY_DATA, &device->flags);
4455 
4456 	drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
4457 
4458 	return 0;
4459 }
4460 
receive_sync_uuid(struct drbd_connection * connection,struct packet_info * pi)4461 static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
4462 {
4463 	struct drbd_peer_device *peer_device;
4464 	struct drbd_device *device;
4465 	struct p_rs_uuid *p = pi->data;
4466 
4467 	peer_device = conn_peer_device(connection, pi->vnr);
4468 	if (!peer_device)
4469 		return -EIO;
4470 	device = peer_device->device;
4471 
4472 	wait_event(device->misc_wait,
4473 		   device->state.conn == C_WF_SYNC_UUID ||
4474 		   device->state.conn == C_BEHIND ||
4475 		   device->state.conn < C_CONNECTED ||
4476 		   device->state.disk < D_NEGOTIATING);
4477 
4478 	/* D_ASSERT(device,  device->state.conn == C_WF_SYNC_UUID ); */
4479 
4480 	/* Here the _drbd_uuid_ functions are right, current should
4481 	   _not_ be rotated into the history */
4482 	if (get_ldev_if_state(device, D_NEGOTIATING)) {
4483 		_drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4484 		_drbd_uuid_set(device, UI_BITMAP, 0UL);
4485 
4486 		drbd_print_uuids(device, "updated sync uuid");
4487 		drbd_start_resync(device, C_SYNC_TARGET);
4488 
4489 		put_ldev(device);
4490 	} else
4491 		drbd_err(device, "Ignoring SyncUUID packet!\n");
4492 
4493 	return 0;
4494 }
4495 
4496 /**
4497  * receive_bitmap_plain
4498  *
4499  * Return 0 when done, 1 when another iteration is needed, and a negative error
4500  * code upon failure.
4501  */
4502 static int
receive_bitmap_plain(struct drbd_peer_device * peer_device,unsigned int size,unsigned long * p,struct bm_xfer_ctx * c)4503 receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
4504 		     unsigned long *p, struct bm_xfer_ctx *c)
4505 {
4506 	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4507 				 drbd_header_size(peer_device->connection);
4508 	unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4509 				       c->bm_words - c->word_offset);
4510 	unsigned int want = num_words * sizeof(*p);
4511 	int err;
4512 
4513 	if (want != size) {
4514 		drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
4515 		return -EIO;
4516 	}
4517 	if (want == 0)
4518 		return 0;
4519 	err = drbd_recv_all(peer_device->connection, p, want);
4520 	if (err)
4521 		return err;
4522 
4523 	drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
4524 
4525 	c->word_offset += num_words;
4526 	c->bit_offset = c->word_offset * BITS_PER_LONG;
4527 	if (c->bit_offset > c->bm_bits)
4528 		c->bit_offset = c->bm_bits;
4529 
4530 	return 1;
4531 }
4532 
dcbp_get_code(struct p_compressed_bm * p)4533 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4534 {
4535 	return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4536 }
4537 
dcbp_get_start(struct p_compressed_bm * p)4538 static int dcbp_get_start(struct p_compressed_bm *p)
4539 {
4540 	return (p->encoding & 0x80) != 0;
4541 }
4542 
dcbp_get_pad_bits(struct p_compressed_bm * p)4543 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4544 {
4545 	return (p->encoding >> 4) & 0x7;
4546 }
4547 
4548 /**
4549  * recv_bm_rle_bits
4550  *
4551  * Return 0 when done, 1 when another iteration is needed, and a negative error
4552  * code upon failure.
4553  */
4554 static int
recv_bm_rle_bits(struct drbd_peer_device * peer_device,struct p_compressed_bm * p,struct bm_xfer_ctx * c,unsigned int len)4555 recv_bm_rle_bits(struct drbd_peer_device *peer_device,
4556 		struct p_compressed_bm *p,
4557 		 struct bm_xfer_ctx *c,
4558 		 unsigned int len)
4559 {
4560 	struct bitstream bs;
4561 	u64 look_ahead;
4562 	u64 rl;
4563 	u64 tmp;
4564 	unsigned long s = c->bit_offset;
4565 	unsigned long e;
4566 	int toggle = dcbp_get_start(p);
4567 	int have;
4568 	int bits;
4569 
4570 	bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4571 
4572 	bits = bitstream_get_bits(&bs, &look_ahead, 64);
4573 	if (bits < 0)
4574 		return -EIO;
4575 
4576 	for (have = bits; have > 0; s += rl, toggle = !toggle) {
4577 		bits = vli_decode_bits(&rl, look_ahead);
4578 		if (bits <= 0)
4579 			return -EIO;
4580 
4581 		if (toggle) {
4582 			e = s + rl -1;
4583 			if (e >= c->bm_bits) {
4584 				drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4585 				return -EIO;
4586 			}
4587 			_drbd_bm_set_bits(peer_device->device, s, e);
4588 		}
4589 
4590 		if (have < bits) {
4591 			drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4592 				have, bits, look_ahead,
4593 				(unsigned int)(bs.cur.b - p->code),
4594 				(unsigned int)bs.buf_len);
4595 			return -EIO;
4596 		}
4597 		/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4598 		if (likely(bits < 64))
4599 			look_ahead >>= bits;
4600 		else
4601 			look_ahead = 0;
4602 		have -= bits;
4603 
4604 		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4605 		if (bits < 0)
4606 			return -EIO;
4607 		look_ahead |= tmp << have;
4608 		have += bits;
4609 	}
4610 
4611 	c->bit_offset = s;
4612 	bm_xfer_ctx_bit_to_word_offset(c);
4613 
4614 	return (s != c->bm_bits);
4615 }
4616 
4617 /**
4618  * decode_bitmap_c
4619  *
4620  * Return 0 when done, 1 when another iteration is needed, and a negative error
4621  * code upon failure.
4622  */
4623 static int
decode_bitmap_c(struct drbd_peer_device * peer_device,struct p_compressed_bm * p,struct bm_xfer_ctx * c,unsigned int len)4624 decode_bitmap_c(struct drbd_peer_device *peer_device,
4625 		struct p_compressed_bm *p,
4626 		struct bm_xfer_ctx *c,
4627 		unsigned int len)
4628 {
4629 	if (dcbp_get_code(p) == RLE_VLI_Bits)
4630 		return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
4631 
4632 	/* other variants had been implemented for evaluation,
4633 	 * but have been dropped as this one turned out to be "best"
4634 	 * during all our tests. */
4635 
4636 	drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4637 	conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4638 	return -EIO;
4639 }
4640 
INFO_bm_xfer_stats(struct drbd_device * device,const char * direction,struct bm_xfer_ctx * c)4641 void INFO_bm_xfer_stats(struct drbd_device *device,
4642 		const char *direction, struct bm_xfer_ctx *c)
4643 {
4644 	/* what would it take to transfer it "plaintext" */
4645 	unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
4646 	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4647 	unsigned int plain =
4648 		header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4649 		c->bm_words * sizeof(unsigned long);
4650 	unsigned int total = c->bytes[0] + c->bytes[1];
4651 	unsigned int r;
4652 
4653 	/* total can not be zero. but just in case: */
4654 	if (total == 0)
4655 		return;
4656 
4657 	/* don't report if not compressed */
4658 	if (total >= plain)
4659 		return;
4660 
4661 	/* total < plain. check for overflow, still */
4662 	r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4663 		                    : (1000 * total / plain);
4664 
4665 	if (r > 1000)
4666 		r = 1000;
4667 
4668 	r = 1000 - r;
4669 	drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4670 	     "total %u; compression: %u.%u%%\n",
4671 			direction,
4672 			c->bytes[1], c->packets[1],
4673 			c->bytes[0], c->packets[0],
4674 			total, r/10, r % 10);
4675 }
4676 
4677 /* Since we are processing the bitfield from lower addresses to higher,
4678    it does not matter if the process it in 32 bit chunks or 64 bit
4679    chunks as long as it is little endian. (Understand it as byte stream,
4680    beginning with the lowest byte...) If we would use big endian
4681    we would need to process it from the highest address to the lowest,
4682    in order to be agnostic to the 32 vs 64 bits issue.
4683 
4684    returns 0 on failure, 1 if we successfully received it. */
receive_bitmap(struct drbd_connection * connection,struct packet_info * pi)4685 static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
4686 {
4687 	struct drbd_peer_device *peer_device;
4688 	struct drbd_device *device;
4689 	struct bm_xfer_ctx c;
4690 	int err;
4691 
4692 	peer_device = conn_peer_device(connection, pi->vnr);
4693 	if (!peer_device)
4694 		return -EIO;
4695 	device = peer_device->device;
4696 
4697 	drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4698 	/* you are supposed to send additional out-of-sync information
4699 	 * if you actually set bits during this phase */
4700 
4701 	c = (struct bm_xfer_ctx) {
4702 		.bm_bits = drbd_bm_bits(device),
4703 		.bm_words = drbd_bm_words(device),
4704 	};
4705 
4706 	for(;;) {
4707 		if (pi->cmd == P_BITMAP)
4708 			err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
4709 		else if (pi->cmd == P_COMPRESSED_BITMAP) {
4710 			/* MAYBE: sanity check that we speak proto >= 90,
4711 			 * and the feature is enabled! */
4712 			struct p_compressed_bm *p = pi->data;
4713 
4714 			if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
4715 				drbd_err(device, "ReportCBitmap packet too large\n");
4716 				err = -EIO;
4717 				goto out;
4718 			}
4719 			if (pi->size <= sizeof(*p)) {
4720 				drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4721 				err = -EIO;
4722 				goto out;
4723 			}
4724 			err = drbd_recv_all(peer_device->connection, p, pi->size);
4725 			if (err)
4726 			       goto out;
4727 			err = decode_bitmap_c(peer_device, p, &c, pi->size);
4728 		} else {
4729 			drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4730 			err = -EIO;
4731 			goto out;
4732 		}
4733 
4734 		c.packets[pi->cmd == P_BITMAP]++;
4735 		c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
4736 
4737 		if (err <= 0) {
4738 			if (err < 0)
4739 				goto out;
4740 			break;
4741 		}
4742 		err = drbd_recv_header(peer_device->connection, pi);
4743 		if (err)
4744 			goto out;
4745 	}
4746 
4747 	INFO_bm_xfer_stats(device, "receive", &c);
4748 
4749 	if (device->state.conn == C_WF_BITMAP_T) {
4750 		enum drbd_state_rv rv;
4751 
4752 		err = drbd_send_bitmap(device);
4753 		if (err)
4754 			goto out;
4755 		/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4756 		rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4757 		D_ASSERT(device, rv == SS_SUCCESS);
4758 	} else if (device->state.conn != C_WF_BITMAP_S) {
4759 		/* admin may have requested C_DISCONNECTING,
4760 		 * other threads may have noticed network errors */
4761 		drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
4762 		    drbd_conn_str(device->state.conn));
4763 	}
4764 	err = 0;
4765 
4766  out:
4767 	drbd_bm_unlock(device);
4768 	if (!err && device->state.conn == C_WF_BITMAP_S)
4769 		drbd_start_resync(device, C_SYNC_SOURCE);
4770 	return err;
4771 }
4772 
receive_skip(struct drbd_connection * connection,struct packet_info * pi)4773 static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
4774 {
4775 	drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
4776 		 pi->cmd, pi->size);
4777 
4778 	return ignore_remaining_packet(connection, pi);
4779 }
4780 
receive_UnplugRemote(struct drbd_connection * connection,struct packet_info * pi)4781 static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
4782 {
4783 	/* Make sure we've acked all the TCP data associated
4784 	 * with the data requests being unplugged */
4785 	drbd_tcp_quickack(connection->data.socket);
4786 
4787 	return 0;
4788 }
4789 
receive_out_of_sync(struct drbd_connection * connection,struct packet_info * pi)4790 static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
4791 {
4792 	struct drbd_peer_device *peer_device;
4793 	struct drbd_device *device;
4794 	struct p_block_desc *p = pi->data;
4795 
4796 	peer_device = conn_peer_device(connection, pi->vnr);
4797 	if (!peer_device)
4798 		return -EIO;
4799 	device = peer_device->device;
4800 
4801 	switch (device->state.conn) {
4802 	case C_WF_SYNC_UUID:
4803 	case C_WF_BITMAP_T:
4804 	case C_BEHIND:
4805 			break;
4806 	default:
4807 		drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4808 				drbd_conn_str(device->state.conn));
4809 	}
4810 
4811 	drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4812 
4813 	return 0;
4814 }
4815 
receive_rs_deallocated(struct drbd_connection * connection,struct packet_info * pi)4816 static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
4817 {
4818 	struct drbd_peer_device *peer_device;
4819 	struct p_block_desc *p = pi->data;
4820 	struct drbd_device *device;
4821 	sector_t sector;
4822 	int size, err = 0;
4823 
4824 	peer_device = conn_peer_device(connection, pi->vnr);
4825 	if (!peer_device)
4826 		return -EIO;
4827 	device = peer_device->device;
4828 
4829 	sector = be64_to_cpu(p->sector);
4830 	size = be32_to_cpu(p->blksize);
4831 
4832 	dec_rs_pending(device);
4833 
4834 	if (get_ldev(device)) {
4835 		struct drbd_peer_request *peer_req;
4836 		const int op = REQ_OP_WRITE_ZEROES;
4837 
4838 		peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
4839 					       size, 0, GFP_NOIO);
4840 		if (!peer_req) {
4841 			put_ldev(device);
4842 			return -ENOMEM;
4843 		}
4844 
4845 		peer_req->w.cb = e_end_resync_block;
4846 		peer_req->submit_jif = jiffies;
4847 		peer_req->flags |= EE_IS_TRIM;
4848 
4849 		spin_lock_irq(&device->resource->req_lock);
4850 		list_add_tail(&peer_req->w.list, &device->sync_ee);
4851 		spin_unlock_irq(&device->resource->req_lock);
4852 
4853 		atomic_add(pi->size >> 9, &device->rs_sect_ev);
4854 		err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
4855 
4856 		if (err) {
4857 			spin_lock_irq(&device->resource->req_lock);
4858 			list_del(&peer_req->w.list);
4859 			spin_unlock_irq(&device->resource->req_lock);
4860 
4861 			drbd_free_peer_req(device, peer_req);
4862 			put_ldev(device);
4863 			err = 0;
4864 			goto fail;
4865 		}
4866 
4867 		inc_unacked(device);
4868 
4869 		/* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
4870 		   as well as drbd_rs_complete_io() */
4871 	} else {
4872 	fail:
4873 		drbd_rs_complete_io(device, sector);
4874 		drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
4875 	}
4876 
4877 	atomic_add(size >> 9, &device->rs_sect_in);
4878 
4879 	return err;
4880 }
4881 
4882 struct data_cmd {
4883 	int expect_payload;
4884 	unsigned int pkt_size;
4885 	int (*fn)(struct drbd_connection *, struct packet_info *);
4886 };
4887 
4888 static struct data_cmd drbd_cmd_handler[] = {
4889 	[P_DATA]	    = { 1, sizeof(struct p_data), receive_Data },
4890 	[P_DATA_REPLY]	    = { 1, sizeof(struct p_data), receive_DataReply },
4891 	[P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4892 	[P_BARRIER]	    = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4893 	[P_BITMAP]	    = { 1, 0, receive_bitmap } ,
4894 	[P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4895 	[P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
4896 	[P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
4897 	[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4898 	[P_SYNC_PARAM]	    = { 1, 0, receive_SyncParam },
4899 	[P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
4900 	[P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
4901 	[P_UUIDS]	    = { 0, sizeof(struct p_uuids), receive_uuids },
4902 	[P_SIZES]	    = { 0, sizeof(struct p_sizes), receive_sizes },
4903 	[P_STATE]	    = { 0, sizeof(struct p_state), receive_state },
4904 	[P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
4905 	[P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4906 	[P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
4907 	[P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
4908 	[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4909 	[P_RS_THIN_REQ]     = { 0, sizeof(struct p_block_req), receive_DataRequest },
4910 	[P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
4911 	[P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4912 	[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4913 	[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4914 	[P_TRIM]	    = { 0, sizeof(struct p_trim), receive_Data },
4915 	[P_RS_DEALLOCATED]  = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
4916 	[P_WSAME]	    = { 1, sizeof(struct p_wsame), receive_Data },
4917 };
4918 
drbdd(struct drbd_connection * connection)4919 static void drbdd(struct drbd_connection *connection)
4920 {
4921 	struct packet_info pi;
4922 	size_t shs; /* sub header size */
4923 	int err;
4924 
4925 	while (get_t_state(&connection->receiver) == RUNNING) {
4926 		struct data_cmd const *cmd;
4927 
4928 		drbd_thread_current_set_cpu(&connection->receiver);
4929 		update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
4930 		if (drbd_recv_header_maybe_unplug(connection, &pi))
4931 			goto err_out;
4932 
4933 		cmd = &drbd_cmd_handler[pi.cmd];
4934 		if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4935 			drbd_err(connection, "Unexpected data packet %s (0x%04x)",
4936 				 cmdname(pi.cmd), pi.cmd);
4937 			goto err_out;
4938 		}
4939 
4940 		shs = cmd->pkt_size;
4941 		if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
4942 			shs += sizeof(struct o_qlim);
4943 		if (pi.size > shs && !cmd->expect_payload) {
4944 			drbd_err(connection, "No payload expected %s l:%d\n",
4945 				 cmdname(pi.cmd), pi.size);
4946 			goto err_out;
4947 		}
4948 		if (pi.size < shs) {
4949 			drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
4950 				 cmdname(pi.cmd), (int)shs, pi.size);
4951 			goto err_out;
4952 		}
4953 
4954 		if (shs) {
4955 			update_receiver_timing_details(connection, drbd_recv_all_warn);
4956 			err = drbd_recv_all_warn(connection, pi.data, shs);
4957 			if (err)
4958 				goto err_out;
4959 			pi.size -= shs;
4960 		}
4961 
4962 		update_receiver_timing_details(connection, cmd->fn);
4963 		err = cmd->fn(connection, &pi);
4964 		if (err) {
4965 			drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
4966 				 cmdname(pi.cmd), err, pi.size);
4967 			goto err_out;
4968 		}
4969 	}
4970 	return;
4971 
4972     err_out:
4973 	conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4974 }
4975 
conn_disconnect(struct drbd_connection * connection)4976 static void conn_disconnect(struct drbd_connection *connection)
4977 {
4978 	struct drbd_peer_device *peer_device;
4979 	enum drbd_conns oc;
4980 	int vnr;
4981 
4982 	if (connection->cstate == C_STANDALONE)
4983 		return;
4984 
4985 	/* We are about to start the cleanup after connection loss.
4986 	 * Make sure drbd_make_request knows about that.
4987 	 * Usually we should be in some network failure state already,
4988 	 * but just in case we are not, we fix it up here.
4989 	 */
4990 	conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4991 
4992 	/* ack_receiver does not clean up anything. it must not interfere, either */
4993 	drbd_thread_stop(&connection->ack_receiver);
4994 	if (connection->ack_sender) {
4995 		destroy_workqueue(connection->ack_sender);
4996 		connection->ack_sender = NULL;
4997 	}
4998 	drbd_free_sock(connection);
4999 
5000 	rcu_read_lock();
5001 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5002 		struct drbd_device *device = peer_device->device;
5003 		kref_get(&device->kref);
5004 		rcu_read_unlock();
5005 		drbd_disconnected(peer_device);
5006 		kref_put(&device->kref, drbd_destroy_device);
5007 		rcu_read_lock();
5008 	}
5009 	rcu_read_unlock();
5010 
5011 	if (!list_empty(&connection->current_epoch->list))
5012 		drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
5013 	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
5014 	atomic_set(&connection->current_epoch->epoch_size, 0);
5015 	connection->send.seen_any_write_yet = false;
5016 
5017 	drbd_info(connection, "Connection closed\n");
5018 
5019 	if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
5020 		conn_try_outdate_peer_async(connection);
5021 
5022 	spin_lock_irq(&connection->resource->req_lock);
5023 	oc = connection->cstate;
5024 	if (oc >= C_UNCONNECTED)
5025 		_conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
5026 
5027 	spin_unlock_irq(&connection->resource->req_lock);
5028 
5029 	if (oc == C_DISCONNECTING)
5030 		conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
5031 }
5032 
drbd_disconnected(struct drbd_peer_device * peer_device)5033 static int drbd_disconnected(struct drbd_peer_device *peer_device)
5034 {
5035 	struct drbd_device *device = peer_device->device;
5036 	unsigned int i;
5037 
5038 	/* wait for current activity to cease. */
5039 	spin_lock_irq(&device->resource->req_lock);
5040 	_drbd_wait_ee_list_empty(device, &device->active_ee);
5041 	_drbd_wait_ee_list_empty(device, &device->sync_ee);
5042 	_drbd_wait_ee_list_empty(device, &device->read_ee);
5043 	spin_unlock_irq(&device->resource->req_lock);
5044 
5045 	/* We do not have data structures that would allow us to
5046 	 * get the rs_pending_cnt down to 0 again.
5047 	 *  * On C_SYNC_TARGET we do not have any data structures describing
5048 	 *    the pending RSDataRequest's we have sent.
5049 	 *  * On C_SYNC_SOURCE there is no data structure that tracks
5050 	 *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
5051 	 *  And no, it is not the sum of the reference counts in the
5052 	 *  resync_LRU. The resync_LRU tracks the whole operation including
5053 	 *  the disk-IO, while the rs_pending_cnt only tracks the blocks
5054 	 *  on the fly. */
5055 	drbd_rs_cancel_all(device);
5056 	device->rs_total = 0;
5057 	device->rs_failed = 0;
5058 	atomic_set(&device->rs_pending_cnt, 0);
5059 	wake_up(&device->misc_wait);
5060 
5061 	del_timer_sync(&device->resync_timer);
5062 	resync_timer_fn(&device->resync_timer);
5063 
5064 	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
5065 	 * w_make_resync_request etc. which may still be on the worker queue
5066 	 * to be "canceled" */
5067 	drbd_flush_workqueue(&peer_device->connection->sender_work);
5068 
5069 	drbd_finish_peer_reqs(device);
5070 
5071 	/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
5072 	   might have issued a work again. The one before drbd_finish_peer_reqs() is
5073 	   necessary to reclain net_ee in drbd_finish_peer_reqs(). */
5074 	drbd_flush_workqueue(&peer_device->connection->sender_work);
5075 
5076 	/* need to do it again, drbd_finish_peer_reqs() may have populated it
5077 	 * again via drbd_try_clear_on_disk_bm(). */
5078 	drbd_rs_cancel_all(device);
5079 
5080 	kfree(device->p_uuid);
5081 	device->p_uuid = NULL;
5082 
5083 	if (!drbd_suspended(device))
5084 		tl_clear(peer_device->connection);
5085 
5086 	drbd_md_sync(device);
5087 
5088 	if (get_ldev(device)) {
5089 		drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5090 				"write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
5091 		put_ldev(device);
5092 	}
5093 
5094 	/* tcp_close and release of sendpage pages can be deferred.  I don't
5095 	 * want to use SO_LINGER, because apparently it can be deferred for
5096 	 * more than 20 seconds (longest time I checked).
5097 	 *
5098 	 * Actually we don't care for exactly when the network stack does its
5099 	 * put_page(), but release our reference on these pages right here.
5100 	 */
5101 	i = drbd_free_peer_reqs(device, &device->net_ee);
5102 	if (i)
5103 		drbd_info(device, "net_ee not empty, killed %u entries\n", i);
5104 	i = atomic_read(&device->pp_in_use_by_net);
5105 	if (i)
5106 		drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
5107 	i = atomic_read(&device->pp_in_use);
5108 	if (i)
5109 		drbd_info(device, "pp_in_use = %d, expected 0\n", i);
5110 
5111 	D_ASSERT(device, list_empty(&device->read_ee));
5112 	D_ASSERT(device, list_empty(&device->active_ee));
5113 	D_ASSERT(device, list_empty(&device->sync_ee));
5114 	D_ASSERT(device, list_empty(&device->done_ee));
5115 
5116 	return 0;
5117 }
5118 
5119 /*
5120  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
5121  * we can agree on is stored in agreed_pro_version.
5122  *
5123  * feature flags and the reserved array should be enough room for future
5124  * enhancements of the handshake protocol, and possible plugins...
5125  *
5126  * for now, they are expected to be zero, but ignored.
5127  */
drbd_send_features(struct drbd_connection * connection)5128 static int drbd_send_features(struct drbd_connection *connection)
5129 {
5130 	struct drbd_socket *sock;
5131 	struct p_connection_features *p;
5132 
5133 	sock = &connection->data;
5134 	p = conn_prepare_command(connection, sock);
5135 	if (!p)
5136 		return -EIO;
5137 	memset(p, 0, sizeof(*p));
5138 	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
5139 	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
5140 	p->feature_flags = cpu_to_be32(PRO_FEATURES);
5141 	return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
5142 }
5143 
5144 /*
5145  * return values:
5146  *   1 yes, we have a valid connection
5147  *   0 oops, did not work out, please try again
5148  *  -1 peer talks different language,
5149  *     no point in trying again, please go standalone.
5150  */
drbd_do_features(struct drbd_connection * connection)5151 static int drbd_do_features(struct drbd_connection *connection)
5152 {
5153 	/* ASSERT current == connection->receiver ... */
5154 	struct p_connection_features *p;
5155 	const int expect = sizeof(struct p_connection_features);
5156 	struct packet_info pi;
5157 	int err;
5158 
5159 	err = drbd_send_features(connection);
5160 	if (err)
5161 		return 0;
5162 
5163 	err = drbd_recv_header(connection, &pi);
5164 	if (err)
5165 		return 0;
5166 
5167 	if (pi.cmd != P_CONNECTION_FEATURES) {
5168 		drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
5169 			 cmdname(pi.cmd), pi.cmd);
5170 		return -1;
5171 	}
5172 
5173 	if (pi.size != expect) {
5174 		drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
5175 		     expect, pi.size);
5176 		return -1;
5177 	}
5178 
5179 	p = pi.data;
5180 	err = drbd_recv_all_warn(connection, p, expect);
5181 	if (err)
5182 		return 0;
5183 
5184 	p->protocol_min = be32_to_cpu(p->protocol_min);
5185 	p->protocol_max = be32_to_cpu(p->protocol_max);
5186 	if (p->protocol_max == 0)
5187 		p->protocol_max = p->protocol_min;
5188 
5189 	if (PRO_VERSION_MAX < p->protocol_min ||
5190 	    PRO_VERSION_MIN > p->protocol_max)
5191 		goto incompat;
5192 
5193 	connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
5194 	connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
5195 
5196 	drbd_info(connection, "Handshake successful: "
5197 	     "Agreed network protocol version %d\n", connection->agreed_pro_version);
5198 
5199 	drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n",
5200 		  connection->agreed_features,
5201 		  connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
5202 		  connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
5203 		  connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" :
5204 		  connection->agreed_features ? "" : " none");
5205 
5206 	return 1;
5207 
5208  incompat:
5209 	drbd_err(connection, "incompatible DRBD dialects: "
5210 	    "I support %d-%d, peer supports %d-%d\n",
5211 	    PRO_VERSION_MIN, PRO_VERSION_MAX,
5212 	    p->protocol_min, p->protocol_max);
5213 	return -1;
5214 }
5215 
5216 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
drbd_do_auth(struct drbd_connection * connection)5217 static int drbd_do_auth(struct drbd_connection *connection)
5218 {
5219 	drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
5220 	drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
5221 	return -1;
5222 }
5223 #else
5224 #define CHALLENGE_LEN 64
5225 
5226 /* Return value:
5227 	1 - auth succeeded,
5228 	0 - failed, try again (network error),
5229 	-1 - auth failed, don't try again.
5230 */
5231 
drbd_do_auth(struct drbd_connection * connection)5232 static int drbd_do_auth(struct drbd_connection *connection)
5233 {
5234 	struct drbd_socket *sock;
5235 	char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
5236 	char *response = NULL;
5237 	char *right_response = NULL;
5238 	char *peers_ch = NULL;
5239 	unsigned int key_len;
5240 	char secret[SHARED_SECRET_MAX]; /* 64 byte */
5241 	unsigned int resp_size;
5242 	SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
5243 	struct packet_info pi;
5244 	struct net_conf *nc;
5245 	int err, rv;
5246 
5247 	/* FIXME: Put the challenge/response into the preallocated socket buffer.  */
5248 
5249 	rcu_read_lock();
5250 	nc = rcu_dereference(connection->net_conf);
5251 	key_len = strlen(nc->shared_secret);
5252 	memcpy(secret, nc->shared_secret, key_len);
5253 	rcu_read_unlock();
5254 
5255 	desc->tfm = connection->cram_hmac_tfm;
5256 	desc->flags = 0;
5257 
5258 	rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
5259 	if (rv) {
5260 		drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
5261 		rv = -1;
5262 		goto fail;
5263 	}
5264 
5265 	get_random_bytes(my_challenge, CHALLENGE_LEN);
5266 
5267 	sock = &connection->data;
5268 	if (!conn_prepare_command(connection, sock)) {
5269 		rv = 0;
5270 		goto fail;
5271 	}
5272 	rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
5273 				my_challenge, CHALLENGE_LEN);
5274 	if (!rv)
5275 		goto fail;
5276 
5277 	err = drbd_recv_header(connection, &pi);
5278 	if (err) {
5279 		rv = 0;
5280 		goto fail;
5281 	}
5282 
5283 	if (pi.cmd != P_AUTH_CHALLENGE) {
5284 		drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
5285 			 cmdname(pi.cmd), pi.cmd);
5286 		rv = 0;
5287 		goto fail;
5288 	}
5289 
5290 	if (pi.size > CHALLENGE_LEN * 2) {
5291 		drbd_err(connection, "expected AuthChallenge payload too big.\n");
5292 		rv = -1;
5293 		goto fail;
5294 	}
5295 
5296 	if (pi.size < CHALLENGE_LEN) {
5297 		drbd_err(connection, "AuthChallenge payload too small.\n");
5298 		rv = -1;
5299 		goto fail;
5300 	}
5301 
5302 	peers_ch = kmalloc(pi.size, GFP_NOIO);
5303 	if (peers_ch == NULL) {
5304 		drbd_err(connection, "kmalloc of peers_ch failed\n");
5305 		rv = -1;
5306 		goto fail;
5307 	}
5308 
5309 	err = drbd_recv_all_warn(connection, peers_ch, pi.size);
5310 	if (err) {
5311 		rv = 0;
5312 		goto fail;
5313 	}
5314 
5315 	if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
5316 		drbd_err(connection, "Peer presented the same challenge!\n");
5317 		rv = -1;
5318 		goto fail;
5319 	}
5320 
5321 	resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
5322 	response = kmalloc(resp_size, GFP_NOIO);
5323 	if (response == NULL) {
5324 		drbd_err(connection, "kmalloc of response failed\n");
5325 		rv = -1;
5326 		goto fail;
5327 	}
5328 
5329 	rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
5330 	if (rv) {
5331 		drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
5332 		rv = -1;
5333 		goto fail;
5334 	}
5335 
5336 	if (!conn_prepare_command(connection, sock)) {
5337 		rv = 0;
5338 		goto fail;
5339 	}
5340 	rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
5341 				response, resp_size);
5342 	if (!rv)
5343 		goto fail;
5344 
5345 	err = drbd_recv_header(connection, &pi);
5346 	if (err) {
5347 		rv = 0;
5348 		goto fail;
5349 	}
5350 
5351 	if (pi.cmd != P_AUTH_RESPONSE) {
5352 		drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
5353 			 cmdname(pi.cmd), pi.cmd);
5354 		rv = 0;
5355 		goto fail;
5356 	}
5357 
5358 	if (pi.size != resp_size) {
5359 		drbd_err(connection, "expected AuthResponse payload of wrong size\n");
5360 		rv = 0;
5361 		goto fail;
5362 	}
5363 
5364 	err = drbd_recv_all_warn(connection, response , resp_size);
5365 	if (err) {
5366 		rv = 0;
5367 		goto fail;
5368 	}
5369 
5370 	right_response = kmalloc(resp_size, GFP_NOIO);
5371 	if (right_response == NULL) {
5372 		drbd_err(connection, "kmalloc of right_response failed\n");
5373 		rv = -1;
5374 		goto fail;
5375 	}
5376 
5377 	rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
5378 				 right_response);
5379 	if (rv) {
5380 		drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
5381 		rv = -1;
5382 		goto fail;
5383 	}
5384 
5385 	rv = !memcmp(response, right_response, resp_size);
5386 
5387 	if (rv)
5388 		drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
5389 		     resp_size);
5390 	else
5391 		rv = -1;
5392 
5393  fail:
5394 	kfree(peers_ch);
5395 	kfree(response);
5396 	kfree(right_response);
5397 	shash_desc_zero(desc);
5398 
5399 	return rv;
5400 }
5401 #endif
5402 
drbd_receiver(struct drbd_thread * thi)5403 int drbd_receiver(struct drbd_thread *thi)
5404 {
5405 	struct drbd_connection *connection = thi->connection;
5406 	int h;
5407 
5408 	drbd_info(connection, "receiver (re)started\n");
5409 
5410 	do {
5411 		h = conn_connect(connection);
5412 		if (h == 0) {
5413 			conn_disconnect(connection);
5414 			schedule_timeout_interruptible(HZ);
5415 		}
5416 		if (h == -1) {
5417 			drbd_warn(connection, "Discarding network configuration.\n");
5418 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
5419 		}
5420 	} while (h == 0);
5421 
5422 	if (h > 0) {
5423 		blk_start_plug(&connection->receiver_plug);
5424 		drbdd(connection);
5425 		blk_finish_plug(&connection->receiver_plug);
5426 	}
5427 
5428 	conn_disconnect(connection);
5429 
5430 	drbd_info(connection, "receiver terminated\n");
5431 	return 0;
5432 }
5433 
5434 /* ********* acknowledge sender ******** */
5435 
got_conn_RqSReply(struct drbd_connection * connection,struct packet_info * pi)5436 static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
5437 {
5438 	struct p_req_state_reply *p = pi->data;
5439 	int retcode = be32_to_cpu(p->retcode);
5440 
5441 	if (retcode >= SS_SUCCESS) {
5442 		set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
5443 	} else {
5444 		set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
5445 		drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
5446 			 drbd_set_st_err_str(retcode), retcode);
5447 	}
5448 	wake_up(&connection->ping_wait);
5449 
5450 	return 0;
5451 }
5452 
got_RqSReply(struct drbd_connection * connection,struct packet_info * pi)5453 static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
5454 {
5455 	struct drbd_peer_device *peer_device;
5456 	struct drbd_device *device;
5457 	struct p_req_state_reply *p = pi->data;
5458 	int retcode = be32_to_cpu(p->retcode);
5459 
5460 	peer_device = conn_peer_device(connection, pi->vnr);
5461 	if (!peer_device)
5462 		return -EIO;
5463 	device = peer_device->device;
5464 
5465 	if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
5466 		D_ASSERT(device, connection->agreed_pro_version < 100);
5467 		return got_conn_RqSReply(connection, pi);
5468 	}
5469 
5470 	if (retcode >= SS_SUCCESS) {
5471 		set_bit(CL_ST_CHG_SUCCESS, &device->flags);
5472 	} else {
5473 		set_bit(CL_ST_CHG_FAIL, &device->flags);
5474 		drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
5475 			drbd_set_st_err_str(retcode), retcode);
5476 	}
5477 	wake_up(&device->state_wait);
5478 
5479 	return 0;
5480 }
5481 
got_Ping(struct drbd_connection * connection,struct packet_info * pi)5482 static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
5483 {
5484 	return drbd_send_ping_ack(connection);
5485 
5486 }
5487 
got_PingAck(struct drbd_connection * connection,struct packet_info * pi)5488 static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
5489 {
5490 	/* restore idle timeout */
5491 	connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
5492 	if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
5493 		wake_up(&connection->ping_wait);
5494 
5495 	return 0;
5496 }
5497 
got_IsInSync(struct drbd_connection * connection,struct packet_info * pi)5498 static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
5499 {
5500 	struct drbd_peer_device *peer_device;
5501 	struct drbd_device *device;
5502 	struct p_block_ack *p = pi->data;
5503 	sector_t sector = be64_to_cpu(p->sector);
5504 	int blksize = be32_to_cpu(p->blksize);
5505 
5506 	peer_device = conn_peer_device(connection, pi->vnr);
5507 	if (!peer_device)
5508 		return -EIO;
5509 	device = peer_device->device;
5510 
5511 	D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
5512 
5513 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5514 
5515 	if (get_ldev(device)) {
5516 		drbd_rs_complete_io(device, sector);
5517 		drbd_set_in_sync(device, sector, blksize);
5518 		/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
5519 		device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5520 		put_ldev(device);
5521 	}
5522 	dec_rs_pending(device);
5523 	atomic_add(blksize >> 9, &device->rs_sect_in);
5524 
5525 	return 0;
5526 }
5527 
5528 static int
validate_req_change_req_state(struct drbd_device * device,u64 id,sector_t sector,struct rb_root * root,const char * func,enum drbd_req_event what,bool missing_ok)5529 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
5530 			      struct rb_root *root, const char *func,
5531 			      enum drbd_req_event what, bool missing_ok)
5532 {
5533 	struct drbd_request *req;
5534 	struct bio_and_error m;
5535 
5536 	spin_lock_irq(&device->resource->req_lock);
5537 	req = find_request(device, root, id, sector, missing_ok, func);
5538 	if (unlikely(!req)) {
5539 		spin_unlock_irq(&device->resource->req_lock);
5540 		return -EIO;
5541 	}
5542 	__req_mod(req, what, &m);
5543 	spin_unlock_irq(&device->resource->req_lock);
5544 
5545 	if (m.bio)
5546 		complete_master_bio(device, &m);
5547 	return 0;
5548 }
5549 
got_BlockAck(struct drbd_connection * connection,struct packet_info * pi)5550 static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
5551 {
5552 	struct drbd_peer_device *peer_device;
5553 	struct drbd_device *device;
5554 	struct p_block_ack *p = pi->data;
5555 	sector_t sector = be64_to_cpu(p->sector);
5556 	int blksize = be32_to_cpu(p->blksize);
5557 	enum drbd_req_event what;
5558 
5559 	peer_device = conn_peer_device(connection, pi->vnr);
5560 	if (!peer_device)
5561 		return -EIO;
5562 	device = peer_device->device;
5563 
5564 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5565 
5566 	if (p->block_id == ID_SYNCER) {
5567 		drbd_set_in_sync(device, sector, blksize);
5568 		dec_rs_pending(device);
5569 		return 0;
5570 	}
5571 	switch (pi->cmd) {
5572 	case P_RS_WRITE_ACK:
5573 		what = WRITE_ACKED_BY_PEER_AND_SIS;
5574 		break;
5575 	case P_WRITE_ACK:
5576 		what = WRITE_ACKED_BY_PEER;
5577 		break;
5578 	case P_RECV_ACK:
5579 		what = RECV_ACKED_BY_PEER;
5580 		break;
5581 	case P_SUPERSEDED:
5582 		what = CONFLICT_RESOLVED;
5583 		break;
5584 	case P_RETRY_WRITE:
5585 		what = POSTPONE_WRITE;
5586 		break;
5587 	default:
5588 		BUG();
5589 	}
5590 
5591 	return validate_req_change_req_state(device, p->block_id, sector,
5592 					     &device->write_requests, __func__,
5593 					     what, false);
5594 }
5595 
got_NegAck(struct drbd_connection * connection,struct packet_info * pi)5596 static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
5597 {
5598 	struct drbd_peer_device *peer_device;
5599 	struct drbd_device *device;
5600 	struct p_block_ack *p = pi->data;
5601 	sector_t sector = be64_to_cpu(p->sector);
5602 	int size = be32_to_cpu(p->blksize);
5603 	int err;
5604 
5605 	peer_device = conn_peer_device(connection, pi->vnr);
5606 	if (!peer_device)
5607 		return -EIO;
5608 	device = peer_device->device;
5609 
5610 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5611 
5612 	if (p->block_id == ID_SYNCER) {
5613 		dec_rs_pending(device);
5614 		drbd_rs_failed_io(device, sector, size);
5615 		return 0;
5616 	}
5617 
5618 	err = validate_req_change_req_state(device, p->block_id, sector,
5619 					    &device->write_requests, __func__,
5620 					    NEG_ACKED, true);
5621 	if (err) {
5622 		/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5623 		   The master bio might already be completed, therefore the
5624 		   request is no longer in the collision hash. */
5625 		/* In Protocol B we might already have got a P_RECV_ACK
5626 		   but then get a P_NEG_ACK afterwards. */
5627 		drbd_set_out_of_sync(device, sector, size);
5628 	}
5629 	return 0;
5630 }
5631 
got_NegDReply(struct drbd_connection * connection,struct packet_info * pi)5632 static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
5633 {
5634 	struct drbd_peer_device *peer_device;
5635 	struct drbd_device *device;
5636 	struct p_block_ack *p = pi->data;
5637 	sector_t sector = be64_to_cpu(p->sector);
5638 
5639 	peer_device = conn_peer_device(connection, pi->vnr);
5640 	if (!peer_device)
5641 		return -EIO;
5642 	device = peer_device->device;
5643 
5644 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5645 
5646 	drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
5647 	    (unsigned long long)sector, be32_to_cpu(p->blksize));
5648 
5649 	return validate_req_change_req_state(device, p->block_id, sector,
5650 					     &device->read_requests, __func__,
5651 					     NEG_ACKED, false);
5652 }
5653 
got_NegRSDReply(struct drbd_connection * connection,struct packet_info * pi)5654 static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
5655 {
5656 	struct drbd_peer_device *peer_device;
5657 	struct drbd_device *device;
5658 	sector_t sector;
5659 	int size;
5660 	struct p_block_ack *p = pi->data;
5661 
5662 	peer_device = conn_peer_device(connection, pi->vnr);
5663 	if (!peer_device)
5664 		return -EIO;
5665 	device = peer_device->device;
5666 
5667 	sector = be64_to_cpu(p->sector);
5668 	size = be32_to_cpu(p->blksize);
5669 
5670 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5671 
5672 	dec_rs_pending(device);
5673 
5674 	if (get_ldev_if_state(device, D_FAILED)) {
5675 		drbd_rs_complete_io(device, sector);
5676 		switch (pi->cmd) {
5677 		case P_NEG_RS_DREPLY:
5678 			drbd_rs_failed_io(device, sector, size);
5679 		case P_RS_CANCEL:
5680 			break;
5681 		default:
5682 			BUG();
5683 		}
5684 		put_ldev(device);
5685 	}
5686 
5687 	return 0;
5688 }
5689 
got_BarrierAck(struct drbd_connection * connection,struct packet_info * pi)5690 static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
5691 {
5692 	struct p_barrier_ack *p = pi->data;
5693 	struct drbd_peer_device *peer_device;
5694 	int vnr;
5695 
5696 	tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
5697 
5698 	rcu_read_lock();
5699 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5700 		struct drbd_device *device = peer_device->device;
5701 
5702 		if (device->state.conn == C_AHEAD &&
5703 		    atomic_read(&device->ap_in_flight) == 0 &&
5704 		    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5705 			device->start_resync_timer.expires = jiffies + HZ;
5706 			add_timer(&device->start_resync_timer);
5707 		}
5708 	}
5709 	rcu_read_unlock();
5710 
5711 	return 0;
5712 }
5713 
got_OVResult(struct drbd_connection * connection,struct packet_info * pi)5714 static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
5715 {
5716 	struct drbd_peer_device *peer_device;
5717 	struct drbd_device *device;
5718 	struct p_block_ack *p = pi->data;
5719 	struct drbd_device_work *dw;
5720 	sector_t sector;
5721 	int size;
5722 
5723 	peer_device = conn_peer_device(connection, pi->vnr);
5724 	if (!peer_device)
5725 		return -EIO;
5726 	device = peer_device->device;
5727 
5728 	sector = be64_to_cpu(p->sector);
5729 	size = be32_to_cpu(p->blksize);
5730 
5731 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5732 
5733 	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5734 		drbd_ov_out_of_sync_found(device, sector, size);
5735 	else
5736 		ov_out_of_sync_print(device);
5737 
5738 	if (!get_ldev(device))
5739 		return 0;
5740 
5741 	drbd_rs_complete_io(device, sector);
5742 	dec_rs_pending(device);
5743 
5744 	--device->ov_left;
5745 
5746 	/* let's advance progress step marks only for every other megabyte */
5747 	if ((device->ov_left & 0x200) == 0x200)
5748 		drbd_advance_rs_marks(device, device->ov_left);
5749 
5750 	if (device->ov_left == 0) {
5751 		dw = kmalloc(sizeof(*dw), GFP_NOIO);
5752 		if (dw) {
5753 			dw->w.cb = w_ov_finished;
5754 			dw->device = device;
5755 			drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
5756 		} else {
5757 			drbd_err(device, "kmalloc(dw) failed.");
5758 			ov_out_of_sync_print(device);
5759 			drbd_resync_finished(device);
5760 		}
5761 	}
5762 	put_ldev(device);
5763 	return 0;
5764 }
5765 
got_skip(struct drbd_connection * connection,struct packet_info * pi)5766 static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
5767 {
5768 	return 0;
5769 }
5770 
5771 struct meta_sock_cmd {
5772 	size_t pkt_size;
5773 	int (*fn)(struct drbd_connection *connection, struct packet_info *);
5774 };
5775 
set_rcvtimeo(struct drbd_connection * connection,bool ping_timeout)5776 static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
5777 {
5778 	long t;
5779 	struct net_conf *nc;
5780 
5781 	rcu_read_lock();
5782 	nc = rcu_dereference(connection->net_conf);
5783 	t = ping_timeout ? nc->ping_timeo : nc->ping_int;
5784 	rcu_read_unlock();
5785 
5786 	t *= HZ;
5787 	if (ping_timeout)
5788 		t /= 10;
5789 
5790 	connection->meta.socket->sk->sk_rcvtimeo = t;
5791 }
5792 
set_ping_timeout(struct drbd_connection * connection)5793 static void set_ping_timeout(struct drbd_connection *connection)
5794 {
5795 	set_rcvtimeo(connection, 1);
5796 }
5797 
set_idle_timeout(struct drbd_connection * connection)5798 static void set_idle_timeout(struct drbd_connection *connection)
5799 {
5800 	set_rcvtimeo(connection, 0);
5801 }
5802 
5803 static struct meta_sock_cmd ack_receiver_tbl[] = {
5804 	[P_PING]	    = { 0, got_Ping },
5805 	[P_PING_ACK]	    = { 0, got_PingAck },
5806 	[P_RECV_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
5807 	[P_WRITE_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
5808 	[P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5809 	[P_SUPERSEDED]   = { sizeof(struct p_block_ack), got_BlockAck },
5810 	[P_NEG_ACK]	    = { sizeof(struct p_block_ack), got_NegAck },
5811 	[P_NEG_DREPLY]	    = { sizeof(struct p_block_ack), got_NegDReply },
5812 	[P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
5813 	[P_OV_RESULT]	    = { sizeof(struct p_block_ack), got_OVResult },
5814 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
5815 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5816 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5817 	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5818 	[P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
5819 	[P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5820 	[P_RETRY_WRITE]	    = { sizeof(struct p_block_ack), got_BlockAck },
5821 };
5822 
drbd_ack_receiver(struct drbd_thread * thi)5823 int drbd_ack_receiver(struct drbd_thread *thi)
5824 {
5825 	struct drbd_connection *connection = thi->connection;
5826 	struct meta_sock_cmd *cmd = NULL;
5827 	struct packet_info pi;
5828 	unsigned long pre_recv_jif;
5829 	int rv;
5830 	void *buf    = connection->meta.rbuf;
5831 	int received = 0;
5832 	unsigned int header_size = drbd_header_size(connection);
5833 	int expect   = header_size;
5834 	bool ping_timeout_active = false;
5835 	struct sched_param param = { .sched_priority = 2 };
5836 
5837 	rv = sched_setscheduler(current, SCHED_RR, &param);
5838 	if (rv < 0)
5839 		drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv);
5840 
5841 	while (get_t_state(thi) == RUNNING) {
5842 		drbd_thread_current_set_cpu(thi);
5843 
5844 		conn_reclaim_net_peer_reqs(connection);
5845 
5846 		if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5847 			if (drbd_send_ping(connection)) {
5848 				drbd_err(connection, "drbd_send_ping has failed\n");
5849 				goto reconnect;
5850 			}
5851 			set_ping_timeout(connection);
5852 			ping_timeout_active = true;
5853 		}
5854 
5855 		pre_recv_jif = jiffies;
5856 		rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
5857 
5858 		/* Note:
5859 		 * -EINTR	 (on meta) we got a signal
5860 		 * -EAGAIN	 (on meta) rcvtimeo expired
5861 		 * -ECONNRESET	 other side closed the connection
5862 		 * -ERESTARTSYS  (on data) we got a signal
5863 		 * rv <  0	 other than above: unexpected error!
5864 		 * rv == expected: full header or command
5865 		 * rv <  expected: "woken" by signal during receive
5866 		 * rv == 0	 : "connection shut down by peer"
5867 		 */
5868 		if (likely(rv > 0)) {
5869 			received += rv;
5870 			buf	 += rv;
5871 		} else if (rv == 0) {
5872 			if (test_bit(DISCONNECT_SENT, &connection->flags)) {
5873 				long t;
5874 				rcu_read_lock();
5875 				t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
5876 				rcu_read_unlock();
5877 
5878 				t = wait_event_timeout(connection->ping_wait,
5879 						       connection->cstate < C_WF_REPORT_PARAMS,
5880 						       t);
5881 				if (t)
5882 					break;
5883 			}
5884 			drbd_err(connection, "meta connection shut down by peer.\n");
5885 			goto reconnect;
5886 		} else if (rv == -EAGAIN) {
5887 			/* If the data socket received something meanwhile,
5888 			 * that is good enough: peer is still alive. */
5889 			if (time_after(connection->last_received, pre_recv_jif))
5890 				continue;
5891 			if (ping_timeout_active) {
5892 				drbd_err(connection, "PingAck did not arrive in time.\n");
5893 				goto reconnect;
5894 			}
5895 			set_bit(SEND_PING, &connection->flags);
5896 			continue;
5897 		} else if (rv == -EINTR) {
5898 			/* maybe drbd_thread_stop(): the while condition will notice.
5899 			 * maybe woken for send_ping: we'll send a ping above,
5900 			 * and change the rcvtimeo */
5901 			flush_signals(current);
5902 			continue;
5903 		} else {
5904 			drbd_err(connection, "sock_recvmsg returned %d\n", rv);
5905 			goto reconnect;
5906 		}
5907 
5908 		if (received == expect && cmd == NULL) {
5909 			if (decode_header(connection, connection->meta.rbuf, &pi))
5910 				goto reconnect;
5911 			cmd = &ack_receiver_tbl[pi.cmd];
5912 			if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
5913 				drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
5914 					 cmdname(pi.cmd), pi.cmd);
5915 				goto disconnect;
5916 			}
5917 			expect = header_size + cmd->pkt_size;
5918 			if (pi.size != expect - header_size) {
5919 				drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
5920 					pi.cmd, pi.size);
5921 				goto reconnect;
5922 			}
5923 		}
5924 		if (received == expect) {
5925 			bool err;
5926 
5927 			err = cmd->fn(connection, &pi);
5928 			if (err) {
5929 				drbd_err(connection, "%pf failed\n", cmd->fn);
5930 				goto reconnect;
5931 			}
5932 
5933 			connection->last_received = jiffies;
5934 
5935 			if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
5936 				set_idle_timeout(connection);
5937 				ping_timeout_active = false;
5938 			}
5939 
5940 			buf	 = connection->meta.rbuf;
5941 			received = 0;
5942 			expect	 = header_size;
5943 			cmd	 = NULL;
5944 		}
5945 	}
5946 
5947 	if (0) {
5948 reconnect:
5949 		conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5950 		conn_md_sync(connection);
5951 	}
5952 	if (0) {
5953 disconnect:
5954 		conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
5955 	}
5956 
5957 	drbd_info(connection, "ack_receiver terminated\n");
5958 
5959 	return 0;
5960 }
5961 
drbd_send_acks_wf(struct work_struct * ws)5962 void drbd_send_acks_wf(struct work_struct *ws)
5963 {
5964 	struct drbd_peer_device *peer_device =
5965 		container_of(ws, struct drbd_peer_device, send_acks_work);
5966 	struct drbd_connection *connection = peer_device->connection;
5967 	struct drbd_device *device = peer_device->device;
5968 	struct net_conf *nc;
5969 	int tcp_cork, err;
5970 
5971 	rcu_read_lock();
5972 	nc = rcu_dereference(connection->net_conf);
5973 	tcp_cork = nc->tcp_cork;
5974 	rcu_read_unlock();
5975 
5976 	if (tcp_cork)
5977 		drbd_tcp_cork(connection->meta.socket);
5978 
5979 	err = drbd_finish_peer_reqs(device);
5980 	kref_put(&device->kref, drbd_destroy_device);
5981 	/* get is in drbd_endio_write_sec_final(). That is necessary to keep the
5982 	   struct work_struct send_acks_work alive, which is in the peer_device object */
5983 
5984 	if (err) {
5985 		conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5986 		return;
5987 	}
5988 
5989 	if (tcp_cork)
5990 		drbd_tcp_uncork(connection->meta.socket);
5991 
5992 	return;
5993 }
5994