1 /*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/module.h>
32 #include <linux/jiffies.h>
33 #include <linux/drbd.h>
34 #include <linux/uaccess.h>
35 #include <asm/types.h>
36 #include <net/sock.h>
37 #include <linux/ctype.h>
38 #include <linux/mutex.h>
39 #include <linux/fs.h>
40 #include <linux/file.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/mm.h>
44 #include <linux/memcontrol.h>
45 #include <linux/mm_inline.h>
46 #include <linux/slab.h>
47 #include <linux/random.h>
48 #include <linux/reboot.h>
49 #include <linux/notifier.h>
50 #include <linux/kthread.h>
51 #include <linux/workqueue.h>
52 #define __KERNEL_SYSCALLS__
53 #include <linux/unistd.h>
54 #include <linux/vmalloc.h>
55 #include <linux/sched/signal.h>
56
57 #include <linux/drbd_limits.h>
58 #include "drbd_int.h"
59 #include "drbd_protocol.h"
60 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
61 #include "drbd_vli.h"
62 #include "drbd_debugfs.h"
63
64 static DEFINE_MUTEX(drbd_main_mutex);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static void drbd_release(struct gendisk *gd, fmode_t mode);
67 static void md_sync_timer_fn(struct timer_list *t);
68 static int w_bitmap_io(struct drbd_work *w, int unused);
69
70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 "Lars Ellenberg <lars@linbit.com>");
72 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73 MODULE_VERSION(REL_VERSION);
74 MODULE_LICENSE("GPL");
75 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
78
79 #include <linux/moduleparam.h>
80 /* thanks to these macros, if compiled into the kernel (not-module),
81 * these become boot parameters (e.g., drbd.minor_count) */
82
83 #ifdef CONFIG_DRBD_FAULT_INJECTION
84 int drbd_enable_faults;
85 int drbd_fault_rate;
86 static int drbd_fault_count;
87 static int drbd_fault_devs;
88 /* bitmap of enabled faults */
89 module_param_named(enable_faults, drbd_enable_faults, int, 0664);
90 /* fault rate % value - applies to all enabled faults */
91 module_param_named(fault_rate, drbd_fault_rate, int, 0664);
92 /* count of faults inserted */
93 module_param_named(fault_count, drbd_fault_count, int, 0664);
94 /* bitmap of devices to insert faults on */
95 module_param_named(fault_devs, drbd_fault_devs, int, 0644);
96 #endif
97
98 /* module parameters we can keep static */
99 static bool drbd_allow_oos; /* allow_open_on_secondary */
100 static bool drbd_disable_sendpage;
101 MODULE_PARM_DESC(allow_oos, "DONT USE!");
102 module_param_named(allow_oos, drbd_allow_oos, bool, 0);
103 module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
104
105 /* module parameters we share */
106 int drbd_proc_details; /* Detail level in proc drbd*/
107 module_param_named(proc_details, drbd_proc_details, int, 0644);
108 /* module parameters shared with defaults */
109 unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
110 /* Module parameter for setting the user mode helper program
111 * to run. Default is /sbin/drbdadm */
112 char drbd_usermode_helper[80] = "/sbin/drbdadm";
113 module_param_named(minor_count, drbd_minor_count, uint, 0444);
114 module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
115
116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
117 * as member "struct gendisk *vdisk;"
118 */
119 struct idr drbd_devices;
120 struct list_head drbd_resources;
121 struct mutex resources_mutex;
122
123 struct kmem_cache *drbd_request_cache;
124 struct kmem_cache *drbd_ee_cache; /* peer requests */
125 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
126 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
127 mempool_t drbd_request_mempool;
128 mempool_t drbd_ee_mempool;
129 mempool_t drbd_md_io_page_pool;
130 struct bio_set drbd_md_io_bio_set;
131 struct bio_set drbd_io_bio_set;
132
133 /* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139 struct page *drbd_pp_pool;
140 spinlock_t drbd_pp_lock;
141 int drbd_pp_vacant;
142 wait_queue_head_t drbd_pp_wait;
143
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
146 static const struct block_device_operations drbd_ops = {
147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150 };
151
bio_alloc_drbd(gfp_t gfp_mask)152 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
153 {
154 struct bio *bio;
155
156 if (!bioset_initialized(&drbd_md_io_bio_set))
157 return bio_alloc(gfp_mask, 1);
158
159 bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
160 if (!bio)
161 return NULL;
162 return bio;
163 }
164
165 #ifdef __CHECKER__
166 /* When checking with sparse, and this is an inline function, sparse will
167 give tons of false positives. When this is a real functions sparse works.
168 */
_get_ldev_if_state(struct drbd_device * device,enum drbd_disk_state mins)169 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
170 {
171 int io_allowed;
172
173 atomic_inc(&device->local_cnt);
174 io_allowed = (device->state.disk >= mins);
175 if (!io_allowed) {
176 if (atomic_dec_and_test(&device->local_cnt))
177 wake_up(&device->misc_wait);
178 }
179 return io_allowed;
180 }
181
182 #endif
183
184 /**
185 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186 * @connection: DRBD connection.
187 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
188 * @set_size: Expected number of requests before that barrier.
189 *
190 * In case the passed barrier_nr or set_size does not match the oldest
191 * epoch of not yet barrier-acked requests, this function will cause a
192 * termination of the connection.
193 */
tl_release(struct drbd_connection * connection,unsigned int barrier_nr,unsigned int set_size)194 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
195 unsigned int set_size)
196 {
197 struct drbd_request *r;
198 struct drbd_request *req = NULL;
199 int expect_epoch = 0;
200 int expect_size = 0;
201
202 spin_lock_irq(&connection->resource->req_lock);
203
204 /* find oldest not yet barrier-acked write request,
205 * count writes in its epoch. */
206 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
207 const unsigned s = r->rq_state;
208 if (!req) {
209 if (!(s & RQ_WRITE))
210 continue;
211 if (!(s & RQ_NET_MASK))
212 continue;
213 if (s & RQ_NET_DONE)
214 continue;
215 req = r;
216 expect_epoch = req->epoch;
217 expect_size ++;
218 } else {
219 if (r->epoch != expect_epoch)
220 break;
221 if (!(s & RQ_WRITE))
222 continue;
223 /* if (s & RQ_DONE): not expected */
224 /* if (!(s & RQ_NET_MASK)): not expected */
225 expect_size++;
226 }
227 }
228
229 /* first some paranoia code */
230 if (req == NULL) {
231 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
232 barrier_nr);
233 goto bail;
234 }
235 if (expect_epoch != barrier_nr) {
236 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
237 barrier_nr, expect_epoch);
238 goto bail;
239 }
240
241 if (expect_size != set_size) {
242 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243 barrier_nr, set_size, expect_size);
244 goto bail;
245 }
246
247 /* Clean up list of requests processed during current epoch. */
248 /* this extra list walk restart is paranoia,
249 * to catch requests being barrier-acked "unexpectedly".
250 * It usually should find the same req again, or some READ preceding it. */
251 list_for_each_entry(req, &connection->transfer_log, tl_requests)
252 if (req->epoch == expect_epoch)
253 break;
254 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
255 if (req->epoch != expect_epoch)
256 break;
257 _req_mod(req, BARRIER_ACKED);
258 }
259 spin_unlock_irq(&connection->resource->req_lock);
260
261 return;
262
263 bail:
264 spin_unlock_irq(&connection->resource->req_lock);
265 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
266 }
267
268
269 /**
270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
271 * @connection: DRBD connection to operate on.
272 * @what: The action/event to perform with all request objects
273 *
274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
276 */
277 /* must hold resource->req_lock */
_tl_restart(struct drbd_connection * connection,enum drbd_req_event what)278 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
279 {
280 struct drbd_request *req, *r;
281
282 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
283 _req_mod(req, what);
284 }
285
tl_restart(struct drbd_connection * connection,enum drbd_req_event what)286 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
287 {
288 spin_lock_irq(&connection->resource->req_lock);
289 _tl_restart(connection, what);
290 spin_unlock_irq(&connection->resource->req_lock);
291 }
292
293 /**
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @device: DRBD device.
296 *
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
300 */
tl_clear(struct drbd_connection * connection)301 void tl_clear(struct drbd_connection *connection)
302 {
303 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
304 }
305
306 /**
307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
308 * @device: DRBD device.
309 */
tl_abort_disk_io(struct drbd_device * device)310 void tl_abort_disk_io(struct drbd_device *device)
311 {
312 struct drbd_connection *connection = first_peer_device(device)->connection;
313 struct drbd_request *req, *r;
314
315 spin_lock_irq(&connection->resource->req_lock);
316 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
317 if (!(req->rq_state & RQ_LOCAL_PENDING))
318 continue;
319 if (req->device != device)
320 continue;
321 _req_mod(req, ABORT_DISK_IO);
322 }
323 spin_unlock_irq(&connection->resource->req_lock);
324 }
325
drbd_thread_setup(void * arg)326 static int drbd_thread_setup(void *arg)
327 {
328 struct drbd_thread *thi = (struct drbd_thread *) arg;
329 struct drbd_resource *resource = thi->resource;
330 unsigned long flags;
331 int retval;
332
333 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
334 thi->name[0],
335 resource->name);
336
337 restart:
338 retval = thi->function(thi);
339
340 spin_lock_irqsave(&thi->t_lock, flags);
341
342 /* if the receiver has been "EXITING", the last thing it did
343 * was set the conn state to "StandAlone",
344 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
345 * and receiver thread will be "started".
346 * drbd_thread_start needs to set "RESTARTING" in that case.
347 * t_state check and assignment needs to be within the same spinlock,
348 * so either thread_start sees EXITING, and can remap to RESTARTING,
349 * or thread_start see NONE, and can proceed as normal.
350 */
351
352 if (thi->t_state == RESTARTING) {
353 drbd_info(resource, "Restarting %s thread\n", thi->name);
354 thi->t_state = RUNNING;
355 spin_unlock_irqrestore(&thi->t_lock, flags);
356 goto restart;
357 }
358
359 thi->task = NULL;
360 thi->t_state = NONE;
361 smp_mb();
362 complete_all(&thi->stop);
363 spin_unlock_irqrestore(&thi->t_lock, flags);
364
365 drbd_info(resource, "Terminating %s\n", current->comm);
366
367 /* Release mod reference taken when thread was started */
368
369 if (thi->connection)
370 kref_put(&thi->connection->kref, drbd_destroy_connection);
371 kref_put(&resource->kref, drbd_destroy_resource);
372 module_put(THIS_MODULE);
373 return retval;
374 }
375
drbd_thread_init(struct drbd_resource * resource,struct drbd_thread * thi,int (* func)(struct drbd_thread *),const char * name)376 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
377 int (*func) (struct drbd_thread *), const char *name)
378 {
379 spin_lock_init(&thi->t_lock);
380 thi->task = NULL;
381 thi->t_state = NONE;
382 thi->function = func;
383 thi->resource = resource;
384 thi->connection = NULL;
385 thi->name = name;
386 }
387
drbd_thread_start(struct drbd_thread * thi)388 int drbd_thread_start(struct drbd_thread *thi)
389 {
390 struct drbd_resource *resource = thi->resource;
391 struct task_struct *nt;
392 unsigned long flags;
393
394 /* is used from state engine doing drbd_thread_stop_nowait,
395 * while holding the req lock irqsave */
396 spin_lock_irqsave(&thi->t_lock, flags);
397
398 switch (thi->t_state) {
399 case NONE:
400 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
401 thi->name, current->comm, current->pid);
402
403 /* Get ref on module for thread - this is released when thread exits */
404 if (!try_module_get(THIS_MODULE)) {
405 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
406 spin_unlock_irqrestore(&thi->t_lock, flags);
407 return false;
408 }
409
410 kref_get(&resource->kref);
411 if (thi->connection)
412 kref_get(&thi->connection->kref);
413
414 init_completion(&thi->stop);
415 thi->reset_cpu_mask = 1;
416 thi->t_state = RUNNING;
417 spin_unlock_irqrestore(&thi->t_lock, flags);
418 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
419
420 nt = kthread_create(drbd_thread_setup, (void *) thi,
421 "drbd_%c_%s", thi->name[0], thi->resource->name);
422
423 if (IS_ERR(nt)) {
424 drbd_err(resource, "Couldn't start thread\n");
425
426 if (thi->connection)
427 kref_put(&thi->connection->kref, drbd_destroy_connection);
428 kref_put(&resource->kref, drbd_destroy_resource);
429 module_put(THIS_MODULE);
430 return false;
431 }
432 spin_lock_irqsave(&thi->t_lock, flags);
433 thi->task = nt;
434 thi->t_state = RUNNING;
435 spin_unlock_irqrestore(&thi->t_lock, flags);
436 wake_up_process(nt);
437 break;
438 case EXITING:
439 thi->t_state = RESTARTING;
440 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
441 thi->name, current->comm, current->pid);
442 /* fall through */
443 case RUNNING:
444 case RESTARTING:
445 default:
446 spin_unlock_irqrestore(&thi->t_lock, flags);
447 break;
448 }
449
450 return true;
451 }
452
453
_drbd_thread_stop(struct drbd_thread * thi,int restart,int wait)454 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
455 {
456 unsigned long flags;
457
458 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
459
460 /* may be called from state engine, holding the req lock irqsave */
461 spin_lock_irqsave(&thi->t_lock, flags);
462
463 if (thi->t_state == NONE) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 if (restart)
466 drbd_thread_start(thi);
467 return;
468 }
469
470 if (thi->t_state != ns) {
471 if (thi->task == NULL) {
472 spin_unlock_irqrestore(&thi->t_lock, flags);
473 return;
474 }
475
476 thi->t_state = ns;
477 smp_mb();
478 init_completion(&thi->stop);
479 if (thi->task != current)
480 force_sig(DRBD_SIGKILL, thi->task);
481 }
482
483 spin_unlock_irqrestore(&thi->t_lock, flags);
484
485 if (wait)
486 wait_for_completion(&thi->stop);
487 }
488
conn_lowest_minor(struct drbd_connection * connection)489 int conn_lowest_minor(struct drbd_connection *connection)
490 {
491 struct drbd_peer_device *peer_device;
492 int vnr = 0, minor = -1;
493
494 rcu_read_lock();
495 peer_device = idr_get_next(&connection->peer_devices, &vnr);
496 if (peer_device)
497 minor = device_to_minor(peer_device->device);
498 rcu_read_unlock();
499
500 return minor;
501 }
502
503 #ifdef CONFIG_SMP
504 /**
505 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
506 *
507 * Forces all threads of a resource onto the same CPU. This is beneficial for
508 * DRBD's performance. May be overwritten by user's configuration.
509 */
drbd_calc_cpu_mask(cpumask_var_t * cpu_mask)510 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
511 {
512 unsigned int *resources_per_cpu, min_index = ~0;
513
514 resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
515 GFP_KERNEL);
516 if (resources_per_cpu) {
517 struct drbd_resource *resource;
518 unsigned int cpu, min = ~0;
519
520 rcu_read_lock();
521 for_each_resource_rcu(resource, &drbd_resources) {
522 for_each_cpu(cpu, resource->cpu_mask)
523 resources_per_cpu[cpu]++;
524 }
525 rcu_read_unlock();
526 for_each_online_cpu(cpu) {
527 if (resources_per_cpu[cpu] < min) {
528 min = resources_per_cpu[cpu];
529 min_index = cpu;
530 }
531 }
532 kfree(resources_per_cpu);
533 }
534 if (min_index == ~0) {
535 cpumask_setall(*cpu_mask);
536 return;
537 }
538 cpumask_set_cpu(min_index, *cpu_mask);
539 }
540
541 /**
542 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
543 * @device: DRBD device.
544 * @thi: drbd_thread object
545 *
546 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
547 * prematurely.
548 */
drbd_thread_current_set_cpu(struct drbd_thread * thi)549 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
550 {
551 struct drbd_resource *resource = thi->resource;
552 struct task_struct *p = current;
553
554 if (!thi->reset_cpu_mask)
555 return;
556 thi->reset_cpu_mask = 0;
557 set_cpus_allowed_ptr(p, resource->cpu_mask);
558 }
559 #else
560 #define drbd_calc_cpu_mask(A) ({})
561 #endif
562
563 /**
564 * drbd_header_size - size of a packet header
565 *
566 * The header size is a multiple of 8, so any payload following the header is
567 * word aligned on 64-bit architectures. (The bitmap send and receive code
568 * relies on this.)
569 */
drbd_header_size(struct drbd_connection * connection)570 unsigned int drbd_header_size(struct drbd_connection *connection)
571 {
572 if (connection->agreed_pro_version >= 100) {
573 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
574 return sizeof(struct p_header100);
575 } else {
576 BUILD_BUG_ON(sizeof(struct p_header80) !=
577 sizeof(struct p_header95));
578 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
579 return sizeof(struct p_header80);
580 }
581 }
582
prepare_header80(struct p_header80 * h,enum drbd_packet cmd,int size)583 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
584 {
585 h->magic = cpu_to_be32(DRBD_MAGIC);
586 h->command = cpu_to_be16(cmd);
587 h->length = cpu_to_be16(size);
588 return sizeof(struct p_header80);
589 }
590
prepare_header95(struct p_header95 * h,enum drbd_packet cmd,int size)591 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
592 {
593 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
594 h->command = cpu_to_be16(cmd);
595 h->length = cpu_to_be32(size);
596 return sizeof(struct p_header95);
597 }
598
prepare_header100(struct p_header100 * h,enum drbd_packet cmd,int size,int vnr)599 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
600 int size, int vnr)
601 {
602 h->magic = cpu_to_be32(DRBD_MAGIC_100);
603 h->volume = cpu_to_be16(vnr);
604 h->command = cpu_to_be16(cmd);
605 h->length = cpu_to_be32(size);
606 h->pad = 0;
607 return sizeof(struct p_header100);
608 }
609
prepare_header(struct drbd_connection * connection,int vnr,void * buffer,enum drbd_packet cmd,int size)610 static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
611 void *buffer, enum drbd_packet cmd, int size)
612 {
613 if (connection->agreed_pro_version >= 100)
614 return prepare_header100(buffer, cmd, size, vnr);
615 else if (connection->agreed_pro_version >= 95 &&
616 size > DRBD_MAX_SIZE_H80_PACKET)
617 return prepare_header95(buffer, cmd, size);
618 else
619 return prepare_header80(buffer, cmd, size);
620 }
621
__conn_prepare_command(struct drbd_connection * connection,struct drbd_socket * sock)622 static void *__conn_prepare_command(struct drbd_connection *connection,
623 struct drbd_socket *sock)
624 {
625 if (!sock->socket)
626 return NULL;
627 return sock->sbuf + drbd_header_size(connection);
628 }
629
conn_prepare_command(struct drbd_connection * connection,struct drbd_socket * sock)630 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
631 {
632 void *p;
633
634 mutex_lock(&sock->mutex);
635 p = __conn_prepare_command(connection, sock);
636 if (!p)
637 mutex_unlock(&sock->mutex);
638
639 return p;
640 }
641
drbd_prepare_command(struct drbd_peer_device * peer_device,struct drbd_socket * sock)642 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
643 {
644 return conn_prepare_command(peer_device->connection, sock);
645 }
646
__send_command(struct drbd_connection * connection,int vnr,struct drbd_socket * sock,enum drbd_packet cmd,unsigned int header_size,void * data,unsigned int size)647 static int __send_command(struct drbd_connection *connection, int vnr,
648 struct drbd_socket *sock, enum drbd_packet cmd,
649 unsigned int header_size, void *data,
650 unsigned int size)
651 {
652 int msg_flags;
653 int err;
654
655 /*
656 * Called with @data == NULL and the size of the data blocks in @size
657 * for commands that send data blocks. For those commands, omit the
658 * MSG_MORE flag: this will increase the likelihood that data blocks
659 * which are page aligned on the sender will end up page aligned on the
660 * receiver.
661 */
662 msg_flags = data ? MSG_MORE : 0;
663
664 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
665 header_size + size);
666 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
667 msg_flags);
668 if (data && !err)
669 err = drbd_send_all(connection, sock->socket, data, size, 0);
670 /* DRBD protocol "pings" are latency critical.
671 * This is supposed to trigger tcp_push_pending_frames() */
672 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
673 drbd_tcp_nodelay(sock->socket);
674
675 return err;
676 }
677
__conn_send_command(struct drbd_connection * connection,struct drbd_socket * sock,enum drbd_packet cmd,unsigned int header_size,void * data,unsigned int size)678 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
679 enum drbd_packet cmd, unsigned int header_size,
680 void *data, unsigned int size)
681 {
682 return __send_command(connection, 0, sock, cmd, header_size, data, size);
683 }
684
conn_send_command(struct drbd_connection * connection,struct drbd_socket * sock,enum drbd_packet cmd,unsigned int header_size,void * data,unsigned int size)685 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
686 enum drbd_packet cmd, unsigned int header_size,
687 void *data, unsigned int size)
688 {
689 int err;
690
691 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
692 mutex_unlock(&sock->mutex);
693 return err;
694 }
695
drbd_send_command(struct drbd_peer_device * peer_device,struct drbd_socket * sock,enum drbd_packet cmd,unsigned int header_size,void * data,unsigned int size)696 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
697 enum drbd_packet cmd, unsigned int header_size,
698 void *data, unsigned int size)
699 {
700 int err;
701
702 err = __send_command(peer_device->connection, peer_device->device->vnr,
703 sock, cmd, header_size, data, size);
704 mutex_unlock(&sock->mutex);
705 return err;
706 }
707
drbd_send_ping(struct drbd_connection * connection)708 int drbd_send_ping(struct drbd_connection *connection)
709 {
710 struct drbd_socket *sock;
711
712 sock = &connection->meta;
713 if (!conn_prepare_command(connection, sock))
714 return -EIO;
715 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
716 }
717
drbd_send_ping_ack(struct drbd_connection * connection)718 int drbd_send_ping_ack(struct drbd_connection *connection)
719 {
720 struct drbd_socket *sock;
721
722 sock = &connection->meta;
723 if (!conn_prepare_command(connection, sock))
724 return -EIO;
725 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
726 }
727
drbd_send_sync_param(struct drbd_peer_device * peer_device)728 int drbd_send_sync_param(struct drbd_peer_device *peer_device)
729 {
730 struct drbd_socket *sock;
731 struct p_rs_param_95 *p;
732 int size;
733 const int apv = peer_device->connection->agreed_pro_version;
734 enum drbd_packet cmd;
735 struct net_conf *nc;
736 struct disk_conf *dc;
737
738 sock = &peer_device->connection->data;
739 p = drbd_prepare_command(peer_device, sock);
740 if (!p)
741 return -EIO;
742
743 rcu_read_lock();
744 nc = rcu_dereference(peer_device->connection->net_conf);
745
746 size = apv <= 87 ? sizeof(struct p_rs_param)
747 : apv == 88 ? sizeof(struct p_rs_param)
748 + strlen(nc->verify_alg) + 1
749 : apv <= 94 ? sizeof(struct p_rs_param_89)
750 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
751
752 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
753
754 /* initialize verify_alg and csums_alg */
755 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
756
757 if (get_ldev(peer_device->device)) {
758 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
759 p->resync_rate = cpu_to_be32(dc->resync_rate);
760 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
761 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
762 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
763 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
764 put_ldev(peer_device->device);
765 } else {
766 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
767 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
768 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
769 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
770 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
771 }
772
773 if (apv >= 88)
774 strcpy(p->verify_alg, nc->verify_alg);
775 if (apv >= 89)
776 strcpy(p->csums_alg, nc->csums_alg);
777 rcu_read_unlock();
778
779 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
780 }
781
__drbd_send_protocol(struct drbd_connection * connection,enum drbd_packet cmd)782 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
783 {
784 struct drbd_socket *sock;
785 struct p_protocol *p;
786 struct net_conf *nc;
787 int size, cf;
788
789 sock = &connection->data;
790 p = __conn_prepare_command(connection, sock);
791 if (!p)
792 return -EIO;
793
794 rcu_read_lock();
795 nc = rcu_dereference(connection->net_conf);
796
797 if (nc->tentative && connection->agreed_pro_version < 92) {
798 rcu_read_unlock();
799 mutex_unlock(&sock->mutex);
800 drbd_err(connection, "--dry-run is not supported by peer");
801 return -EOPNOTSUPP;
802 }
803
804 size = sizeof(*p);
805 if (connection->agreed_pro_version >= 87)
806 size += strlen(nc->integrity_alg) + 1;
807
808 p->protocol = cpu_to_be32(nc->wire_protocol);
809 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
810 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
811 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
812 p->two_primaries = cpu_to_be32(nc->two_primaries);
813 cf = 0;
814 if (nc->discard_my_data)
815 cf |= CF_DISCARD_MY_DATA;
816 if (nc->tentative)
817 cf |= CF_DRY_RUN;
818 p->conn_flags = cpu_to_be32(cf);
819
820 if (connection->agreed_pro_version >= 87)
821 strcpy(p->integrity_alg, nc->integrity_alg);
822 rcu_read_unlock();
823
824 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
825 }
826
drbd_send_protocol(struct drbd_connection * connection)827 int drbd_send_protocol(struct drbd_connection *connection)
828 {
829 int err;
830
831 mutex_lock(&connection->data.mutex);
832 err = __drbd_send_protocol(connection, P_PROTOCOL);
833 mutex_unlock(&connection->data.mutex);
834
835 return err;
836 }
837
_drbd_send_uuids(struct drbd_peer_device * peer_device,u64 uuid_flags)838 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
839 {
840 struct drbd_device *device = peer_device->device;
841 struct drbd_socket *sock;
842 struct p_uuids *p;
843 int i;
844
845 if (!get_ldev_if_state(device, D_NEGOTIATING))
846 return 0;
847
848 sock = &peer_device->connection->data;
849 p = drbd_prepare_command(peer_device, sock);
850 if (!p) {
851 put_ldev(device);
852 return -EIO;
853 }
854 spin_lock_irq(&device->ldev->md.uuid_lock);
855 for (i = UI_CURRENT; i < UI_SIZE; i++)
856 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
857 spin_unlock_irq(&device->ldev->md.uuid_lock);
858
859 device->comm_bm_set = drbd_bm_total_weight(device);
860 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
861 rcu_read_lock();
862 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
863 rcu_read_unlock();
864 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
865 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
866 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
867
868 put_ldev(device);
869 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
870 }
871
drbd_send_uuids(struct drbd_peer_device * peer_device)872 int drbd_send_uuids(struct drbd_peer_device *peer_device)
873 {
874 return _drbd_send_uuids(peer_device, 0);
875 }
876
drbd_send_uuids_skip_initial_sync(struct drbd_peer_device * peer_device)877 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
878 {
879 return _drbd_send_uuids(peer_device, 8);
880 }
881
drbd_print_uuids(struct drbd_device * device,const char * text)882 void drbd_print_uuids(struct drbd_device *device, const char *text)
883 {
884 if (get_ldev_if_state(device, D_NEGOTIATING)) {
885 u64 *uuid = device->ldev->md.uuid;
886 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
887 text,
888 (unsigned long long)uuid[UI_CURRENT],
889 (unsigned long long)uuid[UI_BITMAP],
890 (unsigned long long)uuid[UI_HISTORY_START],
891 (unsigned long long)uuid[UI_HISTORY_END]);
892 put_ldev(device);
893 } else {
894 drbd_info(device, "%s effective data uuid: %016llX\n",
895 text,
896 (unsigned long long)device->ed_uuid);
897 }
898 }
899
drbd_gen_and_send_sync_uuid(struct drbd_peer_device * peer_device)900 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
901 {
902 struct drbd_device *device = peer_device->device;
903 struct drbd_socket *sock;
904 struct p_rs_uuid *p;
905 u64 uuid;
906
907 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
908
909 uuid = device->ldev->md.uuid[UI_BITMAP];
910 if (uuid && uuid != UUID_JUST_CREATED)
911 uuid = uuid + UUID_NEW_BM_OFFSET;
912 else
913 get_random_bytes(&uuid, sizeof(u64));
914 drbd_uuid_set(device, UI_BITMAP, uuid);
915 drbd_print_uuids(device, "updated sync UUID");
916 drbd_md_sync(device);
917
918 sock = &peer_device->connection->data;
919 p = drbd_prepare_command(peer_device, sock);
920 if (p) {
921 p->uuid = cpu_to_be64(uuid);
922 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
923 }
924 }
925
926 /* communicated if (agreed_features & DRBD_FF_WSAME) */
927 static void
assign_p_sizes_qlim(struct drbd_device * device,struct p_sizes * p,struct request_queue * q)928 assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
929 struct request_queue *q)
930 {
931 if (q) {
932 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
933 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
934 p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
935 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
936 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
937 p->qlim->discard_enabled = blk_queue_discard(q);
938 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
939 } else {
940 q = device->rq_queue;
941 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
942 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
943 p->qlim->alignment_offset = 0;
944 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
945 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
946 p->qlim->discard_enabled = 0;
947 p->qlim->write_same_capable = 0;
948 }
949 }
950
drbd_send_sizes(struct drbd_peer_device * peer_device,int trigger_reply,enum dds_flags flags)951 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
952 {
953 struct drbd_device *device = peer_device->device;
954 struct drbd_socket *sock;
955 struct p_sizes *p;
956 sector_t d_size, u_size;
957 int q_order_type;
958 unsigned int max_bio_size;
959 unsigned int packet_size;
960
961 sock = &peer_device->connection->data;
962 p = drbd_prepare_command(peer_device, sock);
963 if (!p)
964 return -EIO;
965
966 packet_size = sizeof(*p);
967 if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
968 packet_size += sizeof(p->qlim[0]);
969
970 memset(p, 0, packet_size);
971 if (get_ldev_if_state(device, D_NEGOTIATING)) {
972 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
973 d_size = drbd_get_max_capacity(device->ldev);
974 rcu_read_lock();
975 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
976 rcu_read_unlock();
977 q_order_type = drbd_queue_order_type(device);
978 max_bio_size = queue_max_hw_sectors(q) << 9;
979 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
980 assign_p_sizes_qlim(device, p, q);
981 put_ldev(device);
982 } else {
983 d_size = 0;
984 u_size = 0;
985 q_order_type = QUEUE_ORDERED_NONE;
986 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
987 assign_p_sizes_qlim(device, p, NULL);
988 }
989
990 if (peer_device->connection->agreed_pro_version <= 94)
991 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
992 else if (peer_device->connection->agreed_pro_version < 100)
993 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
994
995 p->d_size = cpu_to_be64(d_size);
996 p->u_size = cpu_to_be64(u_size);
997 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
998 p->max_bio_size = cpu_to_be32(max_bio_size);
999 p->queue_order_type = cpu_to_be16(q_order_type);
1000 p->dds_flags = cpu_to_be16(flags);
1001
1002 return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
1003 }
1004
1005 /**
1006 * drbd_send_current_state() - Sends the drbd state to the peer
1007 * @peer_device: DRBD peer device.
1008 */
drbd_send_current_state(struct drbd_peer_device * peer_device)1009 int drbd_send_current_state(struct drbd_peer_device *peer_device)
1010 {
1011 struct drbd_socket *sock;
1012 struct p_state *p;
1013
1014 sock = &peer_device->connection->data;
1015 p = drbd_prepare_command(peer_device, sock);
1016 if (!p)
1017 return -EIO;
1018 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1019 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1020 }
1021
1022 /**
1023 * drbd_send_state() - After a state change, sends the new state to the peer
1024 * @peer_device: DRBD peer device.
1025 * @state: the state to send, not necessarily the current state.
1026 *
1027 * Each state change queues an "after_state_ch" work, which will eventually
1028 * send the resulting new state to the peer. If more state changes happen
1029 * between queuing and processing of the after_state_ch work, we still
1030 * want to send each intermediary state in the order it occurred.
1031 */
drbd_send_state(struct drbd_peer_device * peer_device,union drbd_state state)1032 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1033 {
1034 struct drbd_socket *sock;
1035 struct p_state *p;
1036
1037 sock = &peer_device->connection->data;
1038 p = drbd_prepare_command(peer_device, sock);
1039 if (!p)
1040 return -EIO;
1041 p->state = cpu_to_be32(state.i); /* Within the send mutex */
1042 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1043 }
1044
drbd_send_state_req(struct drbd_peer_device * peer_device,union drbd_state mask,union drbd_state val)1045 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1046 {
1047 struct drbd_socket *sock;
1048 struct p_req_state *p;
1049
1050 sock = &peer_device->connection->data;
1051 p = drbd_prepare_command(peer_device, sock);
1052 if (!p)
1053 return -EIO;
1054 p->mask = cpu_to_be32(mask.i);
1055 p->val = cpu_to_be32(val.i);
1056 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1057 }
1058
conn_send_state_req(struct drbd_connection * connection,union drbd_state mask,union drbd_state val)1059 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1060 {
1061 enum drbd_packet cmd;
1062 struct drbd_socket *sock;
1063 struct p_req_state *p;
1064
1065 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1066 sock = &connection->data;
1067 p = conn_prepare_command(connection, sock);
1068 if (!p)
1069 return -EIO;
1070 p->mask = cpu_to_be32(mask.i);
1071 p->val = cpu_to_be32(val.i);
1072 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1073 }
1074
drbd_send_sr_reply(struct drbd_peer_device * peer_device,enum drbd_state_rv retcode)1075 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1076 {
1077 struct drbd_socket *sock;
1078 struct p_req_state_reply *p;
1079
1080 sock = &peer_device->connection->meta;
1081 p = drbd_prepare_command(peer_device, sock);
1082 if (p) {
1083 p->retcode = cpu_to_be32(retcode);
1084 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1085 }
1086 }
1087
conn_send_sr_reply(struct drbd_connection * connection,enum drbd_state_rv retcode)1088 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1089 {
1090 struct drbd_socket *sock;
1091 struct p_req_state_reply *p;
1092 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1093
1094 sock = &connection->meta;
1095 p = conn_prepare_command(connection, sock);
1096 if (p) {
1097 p->retcode = cpu_to_be32(retcode);
1098 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1099 }
1100 }
1101
dcbp_set_code(struct p_compressed_bm * p,enum drbd_bitmap_code code)1102 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1103 {
1104 BUG_ON(code & ~0xf);
1105 p->encoding = (p->encoding & ~0xf) | code;
1106 }
1107
dcbp_set_start(struct p_compressed_bm * p,int set)1108 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1109 {
1110 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1111 }
1112
dcbp_set_pad_bits(struct p_compressed_bm * p,int n)1113 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1114 {
1115 BUG_ON(n & ~0x7);
1116 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1117 }
1118
fill_bitmap_rle_bits(struct drbd_device * device,struct p_compressed_bm * p,unsigned int size,struct bm_xfer_ctx * c)1119 static int fill_bitmap_rle_bits(struct drbd_device *device,
1120 struct p_compressed_bm *p,
1121 unsigned int size,
1122 struct bm_xfer_ctx *c)
1123 {
1124 struct bitstream bs;
1125 unsigned long plain_bits;
1126 unsigned long tmp;
1127 unsigned long rl;
1128 unsigned len;
1129 unsigned toggle;
1130 int bits, use_rle;
1131
1132 /* may we use this feature? */
1133 rcu_read_lock();
1134 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1135 rcu_read_unlock();
1136 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1137 return 0;
1138
1139 if (c->bit_offset >= c->bm_bits)
1140 return 0; /* nothing to do. */
1141
1142 /* use at most thus many bytes */
1143 bitstream_init(&bs, p->code, size, 0);
1144 memset(p->code, 0, size);
1145 /* plain bits covered in this code string */
1146 plain_bits = 0;
1147
1148 /* p->encoding & 0x80 stores whether the first run length is set.
1149 * bit offset is implicit.
1150 * start with toggle == 2 to be able to tell the first iteration */
1151 toggle = 2;
1152
1153 /* see how much plain bits we can stuff into one packet
1154 * using RLE and VLI. */
1155 do {
1156 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1157 : _drbd_bm_find_next(device, c->bit_offset);
1158 if (tmp == -1UL)
1159 tmp = c->bm_bits;
1160 rl = tmp - c->bit_offset;
1161
1162 if (toggle == 2) { /* first iteration */
1163 if (rl == 0) {
1164 /* the first checked bit was set,
1165 * store start value, */
1166 dcbp_set_start(p, 1);
1167 /* but skip encoding of zero run length */
1168 toggle = !toggle;
1169 continue;
1170 }
1171 dcbp_set_start(p, 0);
1172 }
1173
1174 /* paranoia: catch zero runlength.
1175 * can only happen if bitmap is modified while we scan it. */
1176 if (rl == 0) {
1177 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1178 "t:%u bo:%lu\n", toggle, c->bit_offset);
1179 return -1;
1180 }
1181
1182 bits = vli_encode_bits(&bs, rl);
1183 if (bits == -ENOBUFS) /* buffer full */
1184 break;
1185 if (bits <= 0) {
1186 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1187 return 0;
1188 }
1189
1190 toggle = !toggle;
1191 plain_bits += rl;
1192 c->bit_offset = tmp;
1193 } while (c->bit_offset < c->bm_bits);
1194
1195 len = bs.cur.b - p->code + !!bs.cur.bit;
1196
1197 if (plain_bits < (len << 3)) {
1198 /* incompressible with this method.
1199 * we need to rewind both word and bit position. */
1200 c->bit_offset -= plain_bits;
1201 bm_xfer_ctx_bit_to_word_offset(c);
1202 c->bit_offset = c->word_offset * BITS_PER_LONG;
1203 return 0;
1204 }
1205
1206 /* RLE + VLI was able to compress it just fine.
1207 * update c->word_offset. */
1208 bm_xfer_ctx_bit_to_word_offset(c);
1209
1210 /* store pad_bits */
1211 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1212
1213 return len;
1214 }
1215
1216 /**
1217 * send_bitmap_rle_or_plain
1218 *
1219 * Return 0 when done, 1 when another iteration is needed, and a negative error
1220 * code upon failure.
1221 */
1222 static int
send_bitmap_rle_or_plain(struct drbd_device * device,struct bm_xfer_ctx * c)1223 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1224 {
1225 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1226 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1227 struct p_compressed_bm *p = sock->sbuf + header_size;
1228 int len, err;
1229
1230 len = fill_bitmap_rle_bits(device, p,
1231 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1232 if (len < 0)
1233 return -EIO;
1234
1235 if (len) {
1236 dcbp_set_code(p, RLE_VLI_Bits);
1237 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1238 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1239 NULL, 0);
1240 c->packets[0]++;
1241 c->bytes[0] += header_size + sizeof(*p) + len;
1242
1243 if (c->bit_offset >= c->bm_bits)
1244 len = 0; /* DONE */
1245 } else {
1246 /* was not compressible.
1247 * send a buffer full of plain text bits instead. */
1248 unsigned int data_size;
1249 unsigned long num_words;
1250 unsigned long *p = sock->sbuf + header_size;
1251
1252 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1253 num_words = min_t(size_t, data_size / sizeof(*p),
1254 c->bm_words - c->word_offset);
1255 len = num_words * sizeof(*p);
1256 if (len)
1257 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1258 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1259 c->word_offset += num_words;
1260 c->bit_offset = c->word_offset * BITS_PER_LONG;
1261
1262 c->packets[1]++;
1263 c->bytes[1] += header_size + len;
1264
1265 if (c->bit_offset > c->bm_bits)
1266 c->bit_offset = c->bm_bits;
1267 }
1268 if (!err) {
1269 if (len == 0) {
1270 INFO_bm_xfer_stats(device, "send", c);
1271 return 0;
1272 } else
1273 return 1;
1274 }
1275 return -EIO;
1276 }
1277
1278 /* See the comment at receive_bitmap() */
_drbd_send_bitmap(struct drbd_device * device)1279 static int _drbd_send_bitmap(struct drbd_device *device)
1280 {
1281 struct bm_xfer_ctx c;
1282 int err;
1283
1284 if (!expect(device->bitmap))
1285 return false;
1286
1287 if (get_ldev(device)) {
1288 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1289 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1290 drbd_bm_set_all(device);
1291 if (drbd_bm_write(device)) {
1292 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1293 * but otherwise process as per normal - need to tell other
1294 * side that a full resync is required! */
1295 drbd_err(device, "Failed to write bitmap to disk!\n");
1296 } else {
1297 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1298 drbd_md_sync(device);
1299 }
1300 }
1301 put_ldev(device);
1302 }
1303
1304 c = (struct bm_xfer_ctx) {
1305 .bm_bits = drbd_bm_bits(device),
1306 .bm_words = drbd_bm_words(device),
1307 };
1308
1309 do {
1310 err = send_bitmap_rle_or_plain(device, &c);
1311 } while (err > 0);
1312
1313 return err == 0;
1314 }
1315
drbd_send_bitmap(struct drbd_device * device)1316 int drbd_send_bitmap(struct drbd_device *device)
1317 {
1318 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1319 int err = -1;
1320
1321 mutex_lock(&sock->mutex);
1322 if (sock->socket)
1323 err = !_drbd_send_bitmap(device);
1324 mutex_unlock(&sock->mutex);
1325 return err;
1326 }
1327
drbd_send_b_ack(struct drbd_connection * connection,u32 barrier_nr,u32 set_size)1328 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1329 {
1330 struct drbd_socket *sock;
1331 struct p_barrier_ack *p;
1332
1333 if (connection->cstate < C_WF_REPORT_PARAMS)
1334 return;
1335
1336 sock = &connection->meta;
1337 p = conn_prepare_command(connection, sock);
1338 if (!p)
1339 return;
1340 p->barrier = barrier_nr;
1341 p->set_size = cpu_to_be32(set_size);
1342 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1343 }
1344
1345 /**
1346 * _drbd_send_ack() - Sends an ack packet
1347 * @device: DRBD device.
1348 * @cmd: Packet command code.
1349 * @sector: sector, needs to be in big endian byte order
1350 * @blksize: size in byte, needs to be in big endian byte order
1351 * @block_id: Id, big endian byte order
1352 */
_drbd_send_ack(struct drbd_peer_device * peer_device,enum drbd_packet cmd,u64 sector,u32 blksize,u64 block_id)1353 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1354 u64 sector, u32 blksize, u64 block_id)
1355 {
1356 struct drbd_socket *sock;
1357 struct p_block_ack *p;
1358
1359 if (peer_device->device->state.conn < C_CONNECTED)
1360 return -EIO;
1361
1362 sock = &peer_device->connection->meta;
1363 p = drbd_prepare_command(peer_device, sock);
1364 if (!p)
1365 return -EIO;
1366 p->sector = sector;
1367 p->block_id = block_id;
1368 p->blksize = blksize;
1369 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1370 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1371 }
1372
1373 /* dp->sector and dp->block_id already/still in network byte order,
1374 * data_size is payload size according to dp->head,
1375 * and may need to be corrected for digest size. */
drbd_send_ack_dp(struct drbd_peer_device * peer_device,enum drbd_packet cmd,struct p_data * dp,int data_size)1376 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1377 struct p_data *dp, int data_size)
1378 {
1379 if (peer_device->connection->peer_integrity_tfm)
1380 data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1381 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1382 dp->block_id);
1383 }
1384
drbd_send_ack_rp(struct drbd_peer_device * peer_device,enum drbd_packet cmd,struct p_block_req * rp)1385 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1386 struct p_block_req *rp)
1387 {
1388 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1389 }
1390
1391 /**
1392 * drbd_send_ack() - Sends an ack packet
1393 * @device: DRBD device
1394 * @cmd: packet command code
1395 * @peer_req: peer request
1396 */
drbd_send_ack(struct drbd_peer_device * peer_device,enum drbd_packet cmd,struct drbd_peer_request * peer_req)1397 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1398 struct drbd_peer_request *peer_req)
1399 {
1400 return _drbd_send_ack(peer_device, cmd,
1401 cpu_to_be64(peer_req->i.sector),
1402 cpu_to_be32(peer_req->i.size),
1403 peer_req->block_id);
1404 }
1405
1406 /* This function misuses the block_id field to signal if the blocks
1407 * are is sync or not. */
drbd_send_ack_ex(struct drbd_peer_device * peer_device,enum drbd_packet cmd,sector_t sector,int blksize,u64 block_id)1408 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1409 sector_t sector, int blksize, u64 block_id)
1410 {
1411 return _drbd_send_ack(peer_device, cmd,
1412 cpu_to_be64(sector),
1413 cpu_to_be32(blksize),
1414 cpu_to_be64(block_id));
1415 }
1416
drbd_send_rs_deallocated(struct drbd_peer_device * peer_device,struct drbd_peer_request * peer_req)1417 int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1418 struct drbd_peer_request *peer_req)
1419 {
1420 struct drbd_socket *sock;
1421 struct p_block_desc *p;
1422
1423 sock = &peer_device->connection->data;
1424 p = drbd_prepare_command(peer_device, sock);
1425 if (!p)
1426 return -EIO;
1427 p->sector = cpu_to_be64(peer_req->i.sector);
1428 p->blksize = cpu_to_be32(peer_req->i.size);
1429 p->pad = 0;
1430 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1431 }
1432
drbd_send_drequest(struct drbd_peer_device * peer_device,int cmd,sector_t sector,int size,u64 block_id)1433 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1434 sector_t sector, int size, u64 block_id)
1435 {
1436 struct drbd_socket *sock;
1437 struct p_block_req *p;
1438
1439 sock = &peer_device->connection->data;
1440 p = drbd_prepare_command(peer_device, sock);
1441 if (!p)
1442 return -EIO;
1443 p->sector = cpu_to_be64(sector);
1444 p->block_id = block_id;
1445 p->blksize = cpu_to_be32(size);
1446 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1447 }
1448
drbd_send_drequest_csum(struct drbd_peer_device * peer_device,sector_t sector,int size,void * digest,int digest_size,enum drbd_packet cmd)1449 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1450 void *digest, int digest_size, enum drbd_packet cmd)
1451 {
1452 struct drbd_socket *sock;
1453 struct p_block_req *p;
1454
1455 /* FIXME: Put the digest into the preallocated socket buffer. */
1456
1457 sock = &peer_device->connection->data;
1458 p = drbd_prepare_command(peer_device, sock);
1459 if (!p)
1460 return -EIO;
1461 p->sector = cpu_to_be64(sector);
1462 p->block_id = ID_SYNCER /* unused */;
1463 p->blksize = cpu_to_be32(size);
1464 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1465 }
1466
drbd_send_ov_request(struct drbd_peer_device * peer_device,sector_t sector,int size)1467 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1468 {
1469 struct drbd_socket *sock;
1470 struct p_block_req *p;
1471
1472 sock = &peer_device->connection->data;
1473 p = drbd_prepare_command(peer_device, sock);
1474 if (!p)
1475 return -EIO;
1476 p->sector = cpu_to_be64(sector);
1477 p->block_id = ID_SYNCER /* unused */;
1478 p->blksize = cpu_to_be32(size);
1479 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1480 }
1481
1482 /* called on sndtimeo
1483 * returns false if we should retry,
1484 * true if we think connection is dead
1485 */
we_should_drop_the_connection(struct drbd_connection * connection,struct socket * sock)1486 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1487 {
1488 int drop_it;
1489 /* long elapsed = (long)(jiffies - device->last_received); */
1490
1491 drop_it = connection->meta.socket == sock
1492 || !connection->ack_receiver.task
1493 || get_t_state(&connection->ack_receiver) != RUNNING
1494 || connection->cstate < C_WF_REPORT_PARAMS;
1495
1496 if (drop_it)
1497 return true;
1498
1499 drop_it = !--connection->ko_count;
1500 if (!drop_it) {
1501 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1502 current->comm, current->pid, connection->ko_count);
1503 request_ping(connection);
1504 }
1505
1506 return drop_it; /* && (device->state == R_PRIMARY) */;
1507 }
1508
drbd_update_congested(struct drbd_connection * connection)1509 static void drbd_update_congested(struct drbd_connection *connection)
1510 {
1511 struct sock *sk = connection->data.socket->sk;
1512 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1513 set_bit(NET_CONGESTED, &connection->flags);
1514 }
1515
1516 /* The idea of sendpage seems to be to put some kind of reference
1517 * to the page into the skb, and to hand it over to the NIC. In
1518 * this process get_page() gets called.
1519 *
1520 * As soon as the page was really sent over the network put_page()
1521 * gets called by some part of the network layer. [ NIC driver? ]
1522 *
1523 * [ get_page() / put_page() increment/decrement the count. If count
1524 * reaches 0 the page will be freed. ]
1525 *
1526 * This works nicely with pages from FSs.
1527 * But this means that in protocol A we might signal IO completion too early!
1528 *
1529 * In order not to corrupt data during a resync we must make sure
1530 * that we do not reuse our own buffer pages (EEs) to early, therefore
1531 * we have the net_ee list.
1532 *
1533 * XFS seems to have problems, still, it submits pages with page_count == 0!
1534 * As a workaround, we disable sendpage on pages
1535 * with page_count == 0 or PageSlab.
1536 */
_drbd_no_send_page(struct drbd_peer_device * peer_device,struct page * page,int offset,size_t size,unsigned msg_flags)1537 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1538 int offset, size_t size, unsigned msg_flags)
1539 {
1540 struct socket *socket;
1541 void *addr;
1542 int err;
1543
1544 socket = peer_device->connection->data.socket;
1545 addr = kmap(page) + offset;
1546 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1547 kunmap(page);
1548 if (!err)
1549 peer_device->device->send_cnt += size >> 9;
1550 return err;
1551 }
1552
_drbd_send_page(struct drbd_peer_device * peer_device,struct page * page,int offset,size_t size,unsigned msg_flags)1553 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1554 int offset, size_t size, unsigned msg_flags)
1555 {
1556 struct socket *socket = peer_device->connection->data.socket;
1557 int len = size;
1558 int err = -EIO;
1559
1560 /* e.g. XFS meta- & log-data is in slab pages, which have a
1561 * page_count of 0 and/or have PageSlab() set.
1562 * we cannot use send_page for those, as that does get_page();
1563 * put_page(); and would cause either a VM_BUG directly, or
1564 * __page_cache_release a page that would actually still be referenced
1565 * by someone, leading to some obscure delayed Oops somewhere else. */
1566 if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1567 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1568
1569 msg_flags |= MSG_NOSIGNAL;
1570 drbd_update_congested(peer_device->connection);
1571 do {
1572 int sent;
1573
1574 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1575 if (sent <= 0) {
1576 if (sent == -EAGAIN) {
1577 if (we_should_drop_the_connection(peer_device->connection, socket))
1578 break;
1579 continue;
1580 }
1581 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1582 __func__, (int)size, len, sent);
1583 if (sent < 0)
1584 err = sent;
1585 break;
1586 }
1587 len -= sent;
1588 offset += sent;
1589 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1590 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1591
1592 if (len == 0) {
1593 err = 0;
1594 peer_device->device->send_cnt += size >> 9;
1595 }
1596 return err;
1597 }
1598
_drbd_send_bio(struct drbd_peer_device * peer_device,struct bio * bio)1599 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1600 {
1601 struct bio_vec bvec;
1602 struct bvec_iter iter;
1603
1604 /* hint all but last page with MSG_MORE */
1605 bio_for_each_segment(bvec, bio, iter) {
1606 int err;
1607
1608 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1609 bvec.bv_offset, bvec.bv_len,
1610 bio_iter_last(bvec, iter)
1611 ? 0 : MSG_MORE);
1612 if (err)
1613 return err;
1614 /* REQ_OP_WRITE_SAME has only one segment */
1615 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1616 break;
1617 }
1618 return 0;
1619 }
1620
_drbd_send_zc_bio(struct drbd_peer_device * peer_device,struct bio * bio)1621 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1622 {
1623 struct bio_vec bvec;
1624 struct bvec_iter iter;
1625
1626 /* hint all but last page with MSG_MORE */
1627 bio_for_each_segment(bvec, bio, iter) {
1628 int err;
1629
1630 err = _drbd_send_page(peer_device, bvec.bv_page,
1631 bvec.bv_offset, bvec.bv_len,
1632 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1633 if (err)
1634 return err;
1635 /* REQ_OP_WRITE_SAME has only one segment */
1636 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1637 break;
1638 }
1639 return 0;
1640 }
1641
_drbd_send_zc_ee(struct drbd_peer_device * peer_device,struct drbd_peer_request * peer_req)1642 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1643 struct drbd_peer_request *peer_req)
1644 {
1645 struct page *page = peer_req->pages;
1646 unsigned len = peer_req->i.size;
1647 int err;
1648
1649 /* hint all but last page with MSG_MORE */
1650 page_chain_for_each(page) {
1651 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1652
1653 err = _drbd_send_page(peer_device, page, 0, l,
1654 page_chain_next(page) ? MSG_MORE : 0);
1655 if (err)
1656 return err;
1657 len -= l;
1658 }
1659 return 0;
1660 }
1661
bio_flags_to_wire(struct drbd_connection * connection,struct bio * bio)1662 static u32 bio_flags_to_wire(struct drbd_connection *connection,
1663 struct bio *bio)
1664 {
1665 if (connection->agreed_pro_version >= 95)
1666 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1667 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1668 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1669 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1670 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1671 (bio_op(bio) == REQ_OP_WRITE_ZEROES ? DP_DISCARD : 0);
1672 else
1673 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1674 }
1675
1676 /* Used to send write or TRIM aka REQ_DISCARD requests
1677 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1678 */
drbd_send_dblock(struct drbd_peer_device * peer_device,struct drbd_request * req)1679 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1680 {
1681 struct drbd_device *device = peer_device->device;
1682 struct drbd_socket *sock;
1683 struct p_data *p;
1684 struct p_wsame *wsame = NULL;
1685 void *digest_out;
1686 unsigned int dp_flags = 0;
1687 int digest_size;
1688 int err;
1689
1690 sock = &peer_device->connection->data;
1691 p = drbd_prepare_command(peer_device, sock);
1692 digest_size = peer_device->connection->integrity_tfm ?
1693 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1694
1695 if (!p)
1696 return -EIO;
1697 p->sector = cpu_to_be64(req->i.sector);
1698 p->block_id = (unsigned long)req;
1699 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1700 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1701 if (device->state.conn >= C_SYNC_SOURCE &&
1702 device->state.conn <= C_PAUSED_SYNC_T)
1703 dp_flags |= DP_MAY_SET_IN_SYNC;
1704 if (peer_device->connection->agreed_pro_version >= 100) {
1705 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1706 dp_flags |= DP_SEND_RECEIVE_ACK;
1707 /* During resync, request an explicit write ack,
1708 * even in protocol != C */
1709 if (req->rq_state & RQ_EXP_WRITE_ACK
1710 || (dp_flags & DP_MAY_SET_IN_SYNC))
1711 dp_flags |= DP_SEND_WRITE_ACK;
1712 }
1713 p->dp_flags = cpu_to_be32(dp_flags);
1714
1715 if (dp_flags & DP_DISCARD) {
1716 struct p_trim *t = (struct p_trim*)p;
1717 t->size = cpu_to_be32(req->i.size);
1718 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1719 goto out;
1720 }
1721 if (dp_flags & DP_WSAME) {
1722 /* this will only work if DRBD_FF_WSAME is set AND the
1723 * handshake agreed that all nodes and backend devices are
1724 * WRITE_SAME capable and agree on logical_block_size */
1725 wsame = (struct p_wsame*)p;
1726 digest_out = wsame + 1;
1727 wsame->size = cpu_to_be32(req->i.size);
1728 } else
1729 digest_out = p + 1;
1730
1731 /* our digest is still only over the payload.
1732 * TRIM does not carry any payload. */
1733 if (digest_size)
1734 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1735 if (wsame) {
1736 err =
1737 __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1738 sizeof(*wsame) + digest_size, NULL,
1739 bio_iovec(req->master_bio).bv_len);
1740 } else
1741 err =
1742 __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1743 sizeof(*p) + digest_size, NULL, req->i.size);
1744 if (!err) {
1745 /* For protocol A, we have to memcpy the payload into
1746 * socket buffers, as we may complete right away
1747 * as soon as we handed it over to tcp, at which point the data
1748 * pages may become invalid.
1749 *
1750 * For data-integrity enabled, we copy it as well, so we can be
1751 * sure that even if the bio pages may still be modified, it
1752 * won't change the data on the wire, thus if the digest checks
1753 * out ok after sending on this side, but does not fit on the
1754 * receiving side, we sure have detected corruption elsewhere.
1755 */
1756 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1757 err = _drbd_send_bio(peer_device, req->master_bio);
1758 else
1759 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1760
1761 /* double check digest, sometimes buffers have been modified in flight. */
1762 if (digest_size > 0 && digest_size <= 64) {
1763 /* 64 byte, 512 bit, is the largest digest size
1764 * currently supported in kernel crypto. */
1765 unsigned char digest[64];
1766 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1767 if (memcmp(p + 1, digest, digest_size)) {
1768 drbd_warn(device,
1769 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1770 (unsigned long long)req->i.sector, req->i.size);
1771 }
1772 } /* else if (digest_size > 64) {
1773 ... Be noisy about digest too large ...
1774 } */
1775 }
1776 out:
1777 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1778
1779 return err;
1780 }
1781
1782 /* answer packet, used to send data back for read requests:
1783 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1784 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1785 */
drbd_send_block(struct drbd_peer_device * peer_device,enum drbd_packet cmd,struct drbd_peer_request * peer_req)1786 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1787 struct drbd_peer_request *peer_req)
1788 {
1789 struct drbd_device *device = peer_device->device;
1790 struct drbd_socket *sock;
1791 struct p_data *p;
1792 int err;
1793 int digest_size;
1794
1795 sock = &peer_device->connection->data;
1796 p = drbd_prepare_command(peer_device, sock);
1797
1798 digest_size = peer_device->connection->integrity_tfm ?
1799 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1800
1801 if (!p)
1802 return -EIO;
1803 p->sector = cpu_to_be64(peer_req->i.sector);
1804 p->block_id = peer_req->block_id;
1805 p->seq_num = 0; /* unused */
1806 p->dp_flags = 0;
1807 if (digest_size)
1808 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1809 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1810 if (!err)
1811 err = _drbd_send_zc_ee(peer_device, peer_req);
1812 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1813
1814 return err;
1815 }
1816
drbd_send_out_of_sync(struct drbd_peer_device * peer_device,struct drbd_request * req)1817 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1818 {
1819 struct drbd_socket *sock;
1820 struct p_block_desc *p;
1821
1822 sock = &peer_device->connection->data;
1823 p = drbd_prepare_command(peer_device, sock);
1824 if (!p)
1825 return -EIO;
1826 p->sector = cpu_to_be64(req->i.sector);
1827 p->blksize = cpu_to_be32(req->i.size);
1828 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1829 }
1830
1831 /*
1832 drbd_send distinguishes two cases:
1833
1834 Packets sent via the data socket "sock"
1835 and packets sent via the meta data socket "msock"
1836
1837 sock msock
1838 -----------------+-------------------------+------------------------------
1839 timeout conf.timeout / 2 conf.timeout / 2
1840 timeout action send a ping via msock Abort communication
1841 and close all sockets
1842 */
1843
1844 /*
1845 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1846 */
drbd_send(struct drbd_connection * connection,struct socket * sock,void * buf,size_t size,unsigned msg_flags)1847 int drbd_send(struct drbd_connection *connection, struct socket *sock,
1848 void *buf, size_t size, unsigned msg_flags)
1849 {
1850 struct kvec iov = {.iov_base = buf, .iov_len = size};
1851 struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
1852 int rv, sent = 0;
1853
1854 if (!sock)
1855 return -EBADR;
1856
1857 /* THINK if (signal_pending) return ... ? */
1858
1859 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1860
1861 if (sock == connection->data.socket) {
1862 rcu_read_lock();
1863 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1864 rcu_read_unlock();
1865 drbd_update_congested(connection);
1866 }
1867 do {
1868 rv = sock_sendmsg(sock, &msg);
1869 if (rv == -EAGAIN) {
1870 if (we_should_drop_the_connection(connection, sock))
1871 break;
1872 else
1873 continue;
1874 }
1875 if (rv == -EINTR) {
1876 flush_signals(current);
1877 rv = 0;
1878 }
1879 if (rv < 0)
1880 break;
1881 sent += rv;
1882 } while (sent < size);
1883
1884 if (sock == connection->data.socket)
1885 clear_bit(NET_CONGESTED, &connection->flags);
1886
1887 if (rv <= 0) {
1888 if (rv != -EAGAIN) {
1889 drbd_err(connection, "%s_sendmsg returned %d\n",
1890 sock == connection->meta.socket ? "msock" : "sock",
1891 rv);
1892 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1893 } else
1894 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1895 }
1896
1897 return sent;
1898 }
1899
1900 /**
1901 * drbd_send_all - Send an entire buffer
1902 *
1903 * Returns 0 upon success and a negative error value otherwise.
1904 */
drbd_send_all(struct drbd_connection * connection,struct socket * sock,void * buffer,size_t size,unsigned msg_flags)1905 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1906 size_t size, unsigned msg_flags)
1907 {
1908 int err;
1909
1910 err = drbd_send(connection, sock, buffer, size, msg_flags);
1911 if (err < 0)
1912 return err;
1913 if (err != size)
1914 return -EIO;
1915 return 0;
1916 }
1917
drbd_open(struct block_device * bdev,fmode_t mode)1918 static int drbd_open(struct block_device *bdev, fmode_t mode)
1919 {
1920 struct drbd_device *device = bdev->bd_disk->private_data;
1921 unsigned long flags;
1922 int rv = 0;
1923
1924 mutex_lock(&drbd_main_mutex);
1925 spin_lock_irqsave(&device->resource->req_lock, flags);
1926 /* to have a stable device->state.role
1927 * and no race with updating open_cnt */
1928
1929 if (device->state.role != R_PRIMARY) {
1930 if (mode & FMODE_WRITE)
1931 rv = -EROFS;
1932 else if (!drbd_allow_oos)
1933 rv = -EMEDIUMTYPE;
1934 }
1935
1936 if (!rv)
1937 device->open_cnt++;
1938 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1939 mutex_unlock(&drbd_main_mutex);
1940
1941 return rv;
1942 }
1943
drbd_release(struct gendisk * gd,fmode_t mode)1944 static void drbd_release(struct gendisk *gd, fmode_t mode)
1945 {
1946 struct drbd_device *device = gd->private_data;
1947 mutex_lock(&drbd_main_mutex);
1948 device->open_cnt--;
1949 mutex_unlock(&drbd_main_mutex);
1950 }
1951
1952 /* need to hold resource->req_lock */
drbd_queue_unplug(struct drbd_device * device)1953 void drbd_queue_unplug(struct drbd_device *device)
1954 {
1955 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1956 D_ASSERT(device, device->state.role == R_PRIMARY);
1957 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1958 drbd_queue_work_if_unqueued(
1959 &first_peer_device(device)->connection->sender_work,
1960 &device->unplug_work);
1961 }
1962 }
1963 }
1964
drbd_set_defaults(struct drbd_device * device)1965 static void drbd_set_defaults(struct drbd_device *device)
1966 {
1967 /* Beware! The actual layout differs
1968 * between big endian and little endian */
1969 device->state = (union drbd_dev_state) {
1970 { .role = R_SECONDARY,
1971 .peer = R_UNKNOWN,
1972 .conn = C_STANDALONE,
1973 .disk = D_DISKLESS,
1974 .pdsk = D_UNKNOWN,
1975 } };
1976 }
1977
drbd_init_set_defaults(struct drbd_device * device)1978 void drbd_init_set_defaults(struct drbd_device *device)
1979 {
1980 /* the memset(,0,) did most of this.
1981 * note: only assignments, no allocation in here */
1982
1983 drbd_set_defaults(device);
1984
1985 atomic_set(&device->ap_bio_cnt, 0);
1986 atomic_set(&device->ap_actlog_cnt, 0);
1987 atomic_set(&device->ap_pending_cnt, 0);
1988 atomic_set(&device->rs_pending_cnt, 0);
1989 atomic_set(&device->unacked_cnt, 0);
1990 atomic_set(&device->local_cnt, 0);
1991 atomic_set(&device->pp_in_use_by_net, 0);
1992 atomic_set(&device->rs_sect_in, 0);
1993 atomic_set(&device->rs_sect_ev, 0);
1994 atomic_set(&device->ap_in_flight, 0);
1995 atomic_set(&device->md_io.in_use, 0);
1996
1997 mutex_init(&device->own_state_mutex);
1998 device->state_mutex = &device->own_state_mutex;
1999
2000 spin_lock_init(&device->al_lock);
2001 spin_lock_init(&device->peer_seq_lock);
2002
2003 INIT_LIST_HEAD(&device->active_ee);
2004 INIT_LIST_HEAD(&device->sync_ee);
2005 INIT_LIST_HEAD(&device->done_ee);
2006 INIT_LIST_HEAD(&device->read_ee);
2007 INIT_LIST_HEAD(&device->net_ee);
2008 INIT_LIST_HEAD(&device->resync_reads);
2009 INIT_LIST_HEAD(&device->resync_work.list);
2010 INIT_LIST_HEAD(&device->unplug_work.list);
2011 INIT_LIST_HEAD(&device->bm_io_work.w.list);
2012 INIT_LIST_HEAD(&device->pending_master_completion[0]);
2013 INIT_LIST_HEAD(&device->pending_master_completion[1]);
2014 INIT_LIST_HEAD(&device->pending_completion[0]);
2015 INIT_LIST_HEAD(&device->pending_completion[1]);
2016
2017 device->resync_work.cb = w_resync_timer;
2018 device->unplug_work.cb = w_send_write_hint;
2019 device->bm_io_work.w.cb = w_bitmap_io;
2020
2021 timer_setup(&device->resync_timer, resync_timer_fn, 0);
2022 timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
2023 timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
2024 timer_setup(&device->request_timer, request_timer_fn, 0);
2025
2026 init_waitqueue_head(&device->misc_wait);
2027 init_waitqueue_head(&device->state_wait);
2028 init_waitqueue_head(&device->ee_wait);
2029 init_waitqueue_head(&device->al_wait);
2030 init_waitqueue_head(&device->seq_wait);
2031
2032 device->resync_wenr = LC_FREE;
2033 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2034 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2035 }
2036
drbd_device_cleanup(struct drbd_device * device)2037 void drbd_device_cleanup(struct drbd_device *device)
2038 {
2039 int i;
2040 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2041 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2042 first_peer_device(device)->connection->receiver.t_state);
2043
2044 device->al_writ_cnt =
2045 device->bm_writ_cnt =
2046 device->read_cnt =
2047 device->recv_cnt =
2048 device->send_cnt =
2049 device->writ_cnt =
2050 device->p_size =
2051 device->rs_start =
2052 device->rs_total =
2053 device->rs_failed = 0;
2054 device->rs_last_events = 0;
2055 device->rs_last_sect_ev = 0;
2056 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2057 device->rs_mark_left[i] = 0;
2058 device->rs_mark_time[i] = 0;
2059 }
2060 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2061
2062 drbd_set_my_capacity(device, 0);
2063 if (device->bitmap) {
2064 /* maybe never allocated. */
2065 drbd_bm_resize(device, 0, 1);
2066 drbd_bm_cleanup(device);
2067 }
2068
2069 drbd_backing_dev_free(device, device->ldev);
2070 device->ldev = NULL;
2071
2072 clear_bit(AL_SUSPENDED, &device->flags);
2073
2074 D_ASSERT(device, list_empty(&device->active_ee));
2075 D_ASSERT(device, list_empty(&device->sync_ee));
2076 D_ASSERT(device, list_empty(&device->done_ee));
2077 D_ASSERT(device, list_empty(&device->read_ee));
2078 D_ASSERT(device, list_empty(&device->net_ee));
2079 D_ASSERT(device, list_empty(&device->resync_reads));
2080 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2081 D_ASSERT(device, list_empty(&device->resync_work.list));
2082 D_ASSERT(device, list_empty(&device->unplug_work.list));
2083
2084 drbd_set_defaults(device);
2085 }
2086
2087
drbd_destroy_mempools(void)2088 static void drbd_destroy_mempools(void)
2089 {
2090 struct page *page;
2091
2092 while (drbd_pp_pool) {
2093 page = drbd_pp_pool;
2094 drbd_pp_pool = (struct page *)page_private(page);
2095 __free_page(page);
2096 drbd_pp_vacant--;
2097 }
2098
2099 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2100
2101 bioset_exit(&drbd_io_bio_set);
2102 bioset_exit(&drbd_md_io_bio_set);
2103 mempool_exit(&drbd_md_io_page_pool);
2104 mempool_exit(&drbd_ee_mempool);
2105 mempool_exit(&drbd_request_mempool);
2106 kmem_cache_destroy(drbd_ee_cache);
2107 kmem_cache_destroy(drbd_request_cache);
2108 kmem_cache_destroy(drbd_bm_ext_cache);
2109 kmem_cache_destroy(drbd_al_ext_cache);
2110
2111 drbd_ee_cache = NULL;
2112 drbd_request_cache = NULL;
2113 drbd_bm_ext_cache = NULL;
2114 drbd_al_ext_cache = NULL;
2115
2116 return;
2117 }
2118
drbd_create_mempools(void)2119 static int drbd_create_mempools(void)
2120 {
2121 struct page *page;
2122 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2123 int i, ret;
2124
2125 /* caches */
2126 drbd_request_cache = kmem_cache_create(
2127 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2128 if (drbd_request_cache == NULL)
2129 goto Enomem;
2130
2131 drbd_ee_cache = kmem_cache_create(
2132 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2133 if (drbd_ee_cache == NULL)
2134 goto Enomem;
2135
2136 drbd_bm_ext_cache = kmem_cache_create(
2137 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2138 if (drbd_bm_ext_cache == NULL)
2139 goto Enomem;
2140
2141 drbd_al_ext_cache = kmem_cache_create(
2142 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2143 if (drbd_al_ext_cache == NULL)
2144 goto Enomem;
2145
2146 /* mempools */
2147 ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
2148 if (ret)
2149 goto Enomem;
2150
2151 ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
2152 BIOSET_NEED_BVECS);
2153 if (ret)
2154 goto Enomem;
2155
2156 ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
2157 if (ret)
2158 goto Enomem;
2159
2160 ret = mempool_init_slab_pool(&drbd_request_mempool, number,
2161 drbd_request_cache);
2162 if (ret)
2163 goto Enomem;
2164
2165 ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
2166 if (ret)
2167 goto Enomem;
2168
2169 /* drbd's page pool */
2170 spin_lock_init(&drbd_pp_lock);
2171
2172 for (i = 0; i < number; i++) {
2173 page = alloc_page(GFP_HIGHUSER);
2174 if (!page)
2175 goto Enomem;
2176 set_page_private(page, (unsigned long)drbd_pp_pool);
2177 drbd_pp_pool = page;
2178 }
2179 drbd_pp_vacant = number;
2180
2181 return 0;
2182
2183 Enomem:
2184 drbd_destroy_mempools(); /* in case we allocated some */
2185 return -ENOMEM;
2186 }
2187
drbd_release_all_peer_reqs(struct drbd_device * device)2188 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2189 {
2190 int rr;
2191
2192 rr = drbd_free_peer_reqs(device, &device->active_ee);
2193 if (rr)
2194 drbd_err(device, "%d EEs in active list found!\n", rr);
2195
2196 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2197 if (rr)
2198 drbd_err(device, "%d EEs in sync list found!\n", rr);
2199
2200 rr = drbd_free_peer_reqs(device, &device->read_ee);
2201 if (rr)
2202 drbd_err(device, "%d EEs in read list found!\n", rr);
2203
2204 rr = drbd_free_peer_reqs(device, &device->done_ee);
2205 if (rr)
2206 drbd_err(device, "%d EEs in done list found!\n", rr);
2207
2208 rr = drbd_free_peer_reqs(device, &device->net_ee);
2209 if (rr)
2210 drbd_err(device, "%d EEs in net list found!\n", rr);
2211 }
2212
2213 /* caution. no locking. */
drbd_destroy_device(struct kref * kref)2214 void drbd_destroy_device(struct kref *kref)
2215 {
2216 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2217 struct drbd_resource *resource = device->resource;
2218 struct drbd_peer_device *peer_device, *tmp_peer_device;
2219
2220 del_timer_sync(&device->request_timer);
2221
2222 /* paranoia asserts */
2223 D_ASSERT(device, device->open_cnt == 0);
2224 /* end paranoia asserts */
2225
2226 /* cleanup stuff that may have been allocated during
2227 * device (re-)configuration or state changes */
2228
2229 if (device->this_bdev)
2230 bdput(device->this_bdev);
2231
2232 drbd_backing_dev_free(device, device->ldev);
2233 device->ldev = NULL;
2234
2235 drbd_release_all_peer_reqs(device);
2236
2237 lc_destroy(device->act_log);
2238 lc_destroy(device->resync);
2239
2240 kfree(device->p_uuid);
2241 /* device->p_uuid = NULL; */
2242
2243 if (device->bitmap) /* should no longer be there. */
2244 drbd_bm_cleanup(device);
2245 __free_page(device->md_io.page);
2246 put_disk(device->vdisk);
2247 blk_cleanup_queue(device->rq_queue);
2248 kfree(device->rs_plan_s);
2249
2250 /* not for_each_connection(connection, resource):
2251 * those may have been cleaned up and disassociated already.
2252 */
2253 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2254 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2255 kfree(peer_device);
2256 }
2257 memset(device, 0xfd, sizeof(*device));
2258 kfree(device);
2259 kref_put(&resource->kref, drbd_destroy_resource);
2260 }
2261
2262 /* One global retry thread, if we need to push back some bio and have it
2263 * reinserted through our make request function.
2264 */
2265 static struct retry_worker {
2266 struct workqueue_struct *wq;
2267 struct work_struct worker;
2268
2269 spinlock_t lock;
2270 struct list_head writes;
2271 } retry;
2272
do_retry(struct work_struct * ws)2273 static void do_retry(struct work_struct *ws)
2274 {
2275 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2276 LIST_HEAD(writes);
2277 struct drbd_request *req, *tmp;
2278
2279 spin_lock_irq(&retry->lock);
2280 list_splice_init(&retry->writes, &writes);
2281 spin_unlock_irq(&retry->lock);
2282
2283 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2284 struct drbd_device *device = req->device;
2285 struct bio *bio = req->master_bio;
2286 unsigned long start_jif = req->start_jif;
2287 bool expected;
2288
2289 expected =
2290 expect(atomic_read(&req->completion_ref) == 0) &&
2291 expect(req->rq_state & RQ_POSTPONED) &&
2292 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2293 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2294
2295 if (!expected)
2296 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2297 req, atomic_read(&req->completion_ref),
2298 req->rq_state);
2299
2300 /* We still need to put one kref associated with the
2301 * "completion_ref" going zero in the code path that queued it
2302 * here. The request object may still be referenced by a
2303 * frozen local req->private_bio, in case we force-detached.
2304 */
2305 kref_put(&req->kref, drbd_req_destroy);
2306
2307 /* A single suspended or otherwise blocking device may stall
2308 * all others as well. Fortunately, this code path is to
2309 * recover from a situation that "should not happen":
2310 * concurrent writes in multi-primary setup.
2311 * In a "normal" lifecycle, this workqueue is supposed to be
2312 * destroyed without ever doing anything.
2313 * If it turns out to be an issue anyways, we can do per
2314 * resource (replication group) or per device (minor) retry
2315 * workqueues instead.
2316 */
2317
2318 /* We are not just doing generic_make_request(),
2319 * as we want to keep the start_time information. */
2320 inc_ap_bio(device);
2321 __drbd_make_request(device, bio, start_jif);
2322 }
2323 }
2324
2325 /* called via drbd_req_put_completion_ref(),
2326 * holds resource->req_lock */
drbd_restart_request(struct drbd_request * req)2327 void drbd_restart_request(struct drbd_request *req)
2328 {
2329 unsigned long flags;
2330 spin_lock_irqsave(&retry.lock, flags);
2331 list_move_tail(&req->tl_requests, &retry.writes);
2332 spin_unlock_irqrestore(&retry.lock, flags);
2333
2334 /* Drop the extra reference that would otherwise
2335 * have been dropped by complete_master_bio.
2336 * do_retry() needs to grab a new one. */
2337 dec_ap_bio(req->device);
2338
2339 queue_work(retry.wq, &retry.worker);
2340 }
2341
drbd_destroy_resource(struct kref * kref)2342 void drbd_destroy_resource(struct kref *kref)
2343 {
2344 struct drbd_resource *resource =
2345 container_of(kref, struct drbd_resource, kref);
2346
2347 idr_destroy(&resource->devices);
2348 free_cpumask_var(resource->cpu_mask);
2349 kfree(resource->name);
2350 memset(resource, 0xf2, sizeof(*resource));
2351 kfree(resource);
2352 }
2353
drbd_free_resource(struct drbd_resource * resource)2354 void drbd_free_resource(struct drbd_resource *resource)
2355 {
2356 struct drbd_connection *connection, *tmp;
2357
2358 for_each_connection_safe(connection, tmp, resource) {
2359 list_del(&connection->connections);
2360 drbd_debugfs_connection_cleanup(connection);
2361 kref_put(&connection->kref, drbd_destroy_connection);
2362 }
2363 drbd_debugfs_resource_cleanup(resource);
2364 kref_put(&resource->kref, drbd_destroy_resource);
2365 }
2366
drbd_cleanup(void)2367 static void drbd_cleanup(void)
2368 {
2369 unsigned int i;
2370 struct drbd_device *device;
2371 struct drbd_resource *resource, *tmp;
2372
2373 /* first remove proc,
2374 * drbdsetup uses it's presence to detect
2375 * whether DRBD is loaded.
2376 * If we would get stuck in proc removal,
2377 * but have netlink already deregistered,
2378 * some drbdsetup commands may wait forever
2379 * for an answer.
2380 */
2381 if (drbd_proc)
2382 remove_proc_entry("drbd", NULL);
2383
2384 if (retry.wq)
2385 destroy_workqueue(retry.wq);
2386
2387 drbd_genl_unregister();
2388
2389 idr_for_each_entry(&drbd_devices, device, i)
2390 drbd_delete_device(device);
2391
2392 /* not _rcu since, no other updater anymore. Genl already unregistered */
2393 for_each_resource_safe(resource, tmp, &drbd_resources) {
2394 list_del(&resource->resources);
2395 drbd_free_resource(resource);
2396 }
2397
2398 drbd_debugfs_cleanup();
2399
2400 drbd_destroy_mempools();
2401 unregister_blkdev(DRBD_MAJOR, "drbd");
2402
2403 idr_destroy(&drbd_devices);
2404
2405 pr_info("module cleanup done.\n");
2406 }
2407
2408 /**
2409 * drbd_congested() - Callback for the flusher thread
2410 * @congested_data: User data
2411 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2412 *
2413 * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2414 */
drbd_congested(void * congested_data,int bdi_bits)2415 static int drbd_congested(void *congested_data, int bdi_bits)
2416 {
2417 struct drbd_device *device = congested_data;
2418 struct request_queue *q;
2419 char reason = '-';
2420 int r = 0;
2421
2422 if (!may_inc_ap_bio(device)) {
2423 /* DRBD has frozen IO */
2424 r = bdi_bits;
2425 reason = 'd';
2426 goto out;
2427 }
2428
2429 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2430 r |= (1 << WB_async_congested);
2431 /* Without good local data, we would need to read from remote,
2432 * and that would need the worker thread as well, which is
2433 * currently blocked waiting for that usermode helper to
2434 * finish.
2435 */
2436 if (!get_ldev_if_state(device, D_UP_TO_DATE))
2437 r |= (1 << WB_sync_congested);
2438 else
2439 put_ldev(device);
2440 r &= bdi_bits;
2441 reason = 'c';
2442 goto out;
2443 }
2444
2445 if (get_ldev(device)) {
2446 q = bdev_get_queue(device->ldev->backing_bdev);
2447 r = bdi_congested(q->backing_dev_info, bdi_bits);
2448 put_ldev(device);
2449 if (r)
2450 reason = 'b';
2451 }
2452
2453 if (bdi_bits & (1 << WB_async_congested) &&
2454 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2455 r |= (1 << WB_async_congested);
2456 reason = reason == 'b' ? 'a' : 'n';
2457 }
2458
2459 out:
2460 device->congestion_reason = reason;
2461 return r;
2462 }
2463
drbd_init_workqueue(struct drbd_work_queue * wq)2464 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2465 {
2466 spin_lock_init(&wq->q_lock);
2467 INIT_LIST_HEAD(&wq->q);
2468 init_waitqueue_head(&wq->q_wait);
2469 }
2470
2471 struct completion_work {
2472 struct drbd_work w;
2473 struct completion done;
2474 };
2475
w_complete(struct drbd_work * w,int cancel)2476 static int w_complete(struct drbd_work *w, int cancel)
2477 {
2478 struct completion_work *completion_work =
2479 container_of(w, struct completion_work, w);
2480
2481 complete(&completion_work->done);
2482 return 0;
2483 }
2484
drbd_flush_workqueue(struct drbd_work_queue * work_queue)2485 void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2486 {
2487 struct completion_work completion_work;
2488
2489 completion_work.w.cb = w_complete;
2490 init_completion(&completion_work.done);
2491 drbd_queue_work(work_queue, &completion_work.w);
2492 wait_for_completion(&completion_work.done);
2493 }
2494
drbd_find_resource(const char * name)2495 struct drbd_resource *drbd_find_resource(const char *name)
2496 {
2497 struct drbd_resource *resource;
2498
2499 if (!name || !name[0])
2500 return NULL;
2501
2502 rcu_read_lock();
2503 for_each_resource_rcu(resource, &drbd_resources) {
2504 if (!strcmp(resource->name, name)) {
2505 kref_get(&resource->kref);
2506 goto found;
2507 }
2508 }
2509 resource = NULL;
2510 found:
2511 rcu_read_unlock();
2512 return resource;
2513 }
2514
conn_get_by_addrs(void * my_addr,int my_addr_len,void * peer_addr,int peer_addr_len)2515 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2516 void *peer_addr, int peer_addr_len)
2517 {
2518 struct drbd_resource *resource;
2519 struct drbd_connection *connection;
2520
2521 rcu_read_lock();
2522 for_each_resource_rcu(resource, &drbd_resources) {
2523 for_each_connection_rcu(connection, resource) {
2524 if (connection->my_addr_len == my_addr_len &&
2525 connection->peer_addr_len == peer_addr_len &&
2526 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2527 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2528 kref_get(&connection->kref);
2529 goto found;
2530 }
2531 }
2532 }
2533 connection = NULL;
2534 found:
2535 rcu_read_unlock();
2536 return connection;
2537 }
2538
drbd_alloc_socket(struct drbd_socket * socket)2539 static int drbd_alloc_socket(struct drbd_socket *socket)
2540 {
2541 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2542 if (!socket->rbuf)
2543 return -ENOMEM;
2544 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2545 if (!socket->sbuf)
2546 return -ENOMEM;
2547 return 0;
2548 }
2549
drbd_free_socket(struct drbd_socket * socket)2550 static void drbd_free_socket(struct drbd_socket *socket)
2551 {
2552 free_page((unsigned long) socket->sbuf);
2553 free_page((unsigned long) socket->rbuf);
2554 }
2555
conn_free_crypto(struct drbd_connection * connection)2556 void conn_free_crypto(struct drbd_connection *connection)
2557 {
2558 drbd_free_sock(connection);
2559
2560 crypto_free_ahash(connection->csums_tfm);
2561 crypto_free_ahash(connection->verify_tfm);
2562 crypto_free_shash(connection->cram_hmac_tfm);
2563 crypto_free_ahash(connection->integrity_tfm);
2564 crypto_free_ahash(connection->peer_integrity_tfm);
2565 kfree(connection->int_dig_in);
2566 kfree(connection->int_dig_vv);
2567
2568 connection->csums_tfm = NULL;
2569 connection->verify_tfm = NULL;
2570 connection->cram_hmac_tfm = NULL;
2571 connection->integrity_tfm = NULL;
2572 connection->peer_integrity_tfm = NULL;
2573 connection->int_dig_in = NULL;
2574 connection->int_dig_vv = NULL;
2575 }
2576
set_resource_options(struct drbd_resource * resource,struct res_opts * res_opts)2577 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2578 {
2579 struct drbd_connection *connection;
2580 cpumask_var_t new_cpu_mask;
2581 int err;
2582
2583 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2584 return -ENOMEM;
2585
2586 /* silently ignore cpu mask on UP kernel */
2587 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2588 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2589 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2590 if (err == -EOVERFLOW) {
2591 /* So what. mask it out. */
2592 cpumask_var_t tmp_cpu_mask;
2593 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2594 cpumask_setall(tmp_cpu_mask);
2595 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2596 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2597 res_opts->cpu_mask,
2598 strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2599 nr_cpu_ids);
2600 free_cpumask_var(tmp_cpu_mask);
2601 err = 0;
2602 }
2603 }
2604 if (err) {
2605 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2606 /* retcode = ERR_CPU_MASK_PARSE; */
2607 goto fail;
2608 }
2609 }
2610 resource->res_opts = *res_opts;
2611 if (cpumask_empty(new_cpu_mask))
2612 drbd_calc_cpu_mask(&new_cpu_mask);
2613 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2614 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2615 for_each_connection_rcu(connection, resource) {
2616 connection->receiver.reset_cpu_mask = 1;
2617 connection->ack_receiver.reset_cpu_mask = 1;
2618 connection->worker.reset_cpu_mask = 1;
2619 }
2620 }
2621 err = 0;
2622
2623 fail:
2624 free_cpumask_var(new_cpu_mask);
2625 return err;
2626
2627 }
2628
drbd_create_resource(const char * name)2629 struct drbd_resource *drbd_create_resource(const char *name)
2630 {
2631 struct drbd_resource *resource;
2632
2633 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2634 if (!resource)
2635 goto fail;
2636 resource->name = kstrdup(name, GFP_KERNEL);
2637 if (!resource->name)
2638 goto fail_free_resource;
2639 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2640 goto fail_free_name;
2641 kref_init(&resource->kref);
2642 idr_init(&resource->devices);
2643 INIT_LIST_HEAD(&resource->connections);
2644 resource->write_ordering = WO_BDEV_FLUSH;
2645 list_add_tail_rcu(&resource->resources, &drbd_resources);
2646 mutex_init(&resource->conf_update);
2647 mutex_init(&resource->adm_mutex);
2648 spin_lock_init(&resource->req_lock);
2649 drbd_debugfs_resource_add(resource);
2650 return resource;
2651
2652 fail_free_name:
2653 kfree(resource->name);
2654 fail_free_resource:
2655 kfree(resource);
2656 fail:
2657 return NULL;
2658 }
2659
2660 /* caller must be under adm_mutex */
conn_create(const char * name,struct res_opts * res_opts)2661 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2662 {
2663 struct drbd_resource *resource;
2664 struct drbd_connection *connection;
2665
2666 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2667 if (!connection)
2668 return NULL;
2669
2670 if (drbd_alloc_socket(&connection->data))
2671 goto fail;
2672 if (drbd_alloc_socket(&connection->meta))
2673 goto fail;
2674
2675 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2676 if (!connection->current_epoch)
2677 goto fail;
2678
2679 INIT_LIST_HEAD(&connection->transfer_log);
2680
2681 INIT_LIST_HEAD(&connection->current_epoch->list);
2682 connection->epochs = 1;
2683 spin_lock_init(&connection->epoch_lock);
2684
2685 connection->send.seen_any_write_yet = false;
2686 connection->send.current_epoch_nr = 0;
2687 connection->send.current_epoch_writes = 0;
2688
2689 resource = drbd_create_resource(name);
2690 if (!resource)
2691 goto fail;
2692
2693 connection->cstate = C_STANDALONE;
2694 mutex_init(&connection->cstate_mutex);
2695 init_waitqueue_head(&connection->ping_wait);
2696 idr_init(&connection->peer_devices);
2697
2698 drbd_init_workqueue(&connection->sender_work);
2699 mutex_init(&connection->data.mutex);
2700 mutex_init(&connection->meta.mutex);
2701
2702 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2703 connection->receiver.connection = connection;
2704 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2705 connection->worker.connection = connection;
2706 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2707 connection->ack_receiver.connection = connection;
2708
2709 kref_init(&connection->kref);
2710
2711 connection->resource = resource;
2712
2713 if (set_resource_options(resource, res_opts))
2714 goto fail_resource;
2715
2716 kref_get(&resource->kref);
2717 list_add_tail_rcu(&connection->connections, &resource->connections);
2718 drbd_debugfs_connection_add(connection);
2719 return connection;
2720
2721 fail_resource:
2722 list_del(&resource->resources);
2723 drbd_free_resource(resource);
2724 fail:
2725 kfree(connection->current_epoch);
2726 drbd_free_socket(&connection->meta);
2727 drbd_free_socket(&connection->data);
2728 kfree(connection);
2729 return NULL;
2730 }
2731
drbd_destroy_connection(struct kref * kref)2732 void drbd_destroy_connection(struct kref *kref)
2733 {
2734 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2735 struct drbd_resource *resource = connection->resource;
2736
2737 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2738 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2739 kfree(connection->current_epoch);
2740
2741 idr_destroy(&connection->peer_devices);
2742
2743 drbd_free_socket(&connection->meta);
2744 drbd_free_socket(&connection->data);
2745 kfree(connection->int_dig_in);
2746 kfree(connection->int_dig_vv);
2747 memset(connection, 0xfc, sizeof(*connection));
2748 kfree(connection);
2749 kref_put(&resource->kref, drbd_destroy_resource);
2750 }
2751
init_submitter(struct drbd_device * device)2752 static int init_submitter(struct drbd_device *device)
2753 {
2754 /* opencoded create_singlethread_workqueue(),
2755 * to be able to say "drbd%d", ..., minor */
2756 device->submit.wq =
2757 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2758 if (!device->submit.wq)
2759 return -ENOMEM;
2760
2761 INIT_WORK(&device->submit.worker, do_submit);
2762 INIT_LIST_HEAD(&device->submit.writes);
2763 return 0;
2764 }
2765
drbd_create_device(struct drbd_config_context * adm_ctx,unsigned int minor)2766 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2767 {
2768 struct drbd_resource *resource = adm_ctx->resource;
2769 struct drbd_connection *connection;
2770 struct drbd_device *device;
2771 struct drbd_peer_device *peer_device, *tmp_peer_device;
2772 struct gendisk *disk;
2773 struct request_queue *q;
2774 int id;
2775 int vnr = adm_ctx->volume;
2776 enum drbd_ret_code err = ERR_NOMEM;
2777
2778 device = minor_to_device(minor);
2779 if (device)
2780 return ERR_MINOR_OR_VOLUME_EXISTS;
2781
2782 /* GFP_KERNEL, we are outside of all write-out paths */
2783 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2784 if (!device)
2785 return ERR_NOMEM;
2786 kref_init(&device->kref);
2787
2788 kref_get(&resource->kref);
2789 device->resource = resource;
2790 device->minor = minor;
2791 device->vnr = vnr;
2792
2793 drbd_init_set_defaults(device);
2794
2795 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock);
2796 if (!q)
2797 goto out_no_q;
2798 device->rq_queue = q;
2799 q->queuedata = device;
2800
2801 disk = alloc_disk(1);
2802 if (!disk)
2803 goto out_no_disk;
2804 device->vdisk = disk;
2805
2806 set_disk_ro(disk, true);
2807
2808 disk->queue = q;
2809 disk->major = DRBD_MAJOR;
2810 disk->first_minor = minor;
2811 disk->fops = &drbd_ops;
2812 sprintf(disk->disk_name, "drbd%d", minor);
2813 disk->private_data = device;
2814
2815 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2816 /* we have no partitions. we contain only ourselves. */
2817 device->this_bdev->bd_contains = device->this_bdev;
2818
2819 q->backing_dev_info->congested_fn = drbd_congested;
2820 q->backing_dev_info->congested_data = device;
2821
2822 blk_queue_make_request(q, drbd_make_request);
2823 blk_queue_write_cache(q, true, true);
2824 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2825 This triggers a max_bio_size message upon first attach or connect */
2826 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2827
2828 device->md_io.page = alloc_page(GFP_KERNEL);
2829 if (!device->md_io.page)
2830 goto out_no_io_page;
2831
2832 if (drbd_bm_init(device))
2833 goto out_no_bitmap;
2834 device->read_requests = RB_ROOT;
2835 device->write_requests = RB_ROOT;
2836
2837 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2838 if (id < 0) {
2839 if (id == -ENOSPC)
2840 err = ERR_MINOR_OR_VOLUME_EXISTS;
2841 goto out_no_minor_idr;
2842 }
2843 kref_get(&device->kref);
2844
2845 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2846 if (id < 0) {
2847 if (id == -ENOSPC)
2848 err = ERR_MINOR_OR_VOLUME_EXISTS;
2849 goto out_idr_remove_minor;
2850 }
2851 kref_get(&device->kref);
2852
2853 INIT_LIST_HEAD(&device->peer_devices);
2854 INIT_LIST_HEAD(&device->pending_bitmap_io);
2855 for_each_connection(connection, resource) {
2856 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2857 if (!peer_device)
2858 goto out_idr_remove_from_resource;
2859 peer_device->connection = connection;
2860 peer_device->device = device;
2861
2862 list_add(&peer_device->peer_devices, &device->peer_devices);
2863 kref_get(&device->kref);
2864
2865 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2866 if (id < 0) {
2867 if (id == -ENOSPC)
2868 err = ERR_INVALID_REQUEST;
2869 goto out_idr_remove_from_resource;
2870 }
2871 kref_get(&connection->kref);
2872 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2873 }
2874
2875 if (init_submitter(device)) {
2876 err = ERR_NOMEM;
2877 goto out_idr_remove_vol;
2878 }
2879
2880 add_disk(disk);
2881
2882 /* inherit the connection state */
2883 device->state.conn = first_connection(resource)->cstate;
2884 if (device->state.conn == C_WF_REPORT_PARAMS) {
2885 for_each_peer_device(peer_device, device)
2886 drbd_connected(peer_device);
2887 }
2888 /* move to create_peer_device() */
2889 for_each_peer_device(peer_device, device)
2890 drbd_debugfs_peer_device_add(peer_device);
2891 drbd_debugfs_device_add(device);
2892 return NO_ERROR;
2893
2894 out_idr_remove_vol:
2895 idr_remove(&connection->peer_devices, vnr);
2896 out_idr_remove_from_resource:
2897 for_each_connection(connection, resource) {
2898 peer_device = idr_remove(&connection->peer_devices, vnr);
2899 if (peer_device)
2900 kref_put(&connection->kref, drbd_destroy_connection);
2901 }
2902 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2903 list_del(&peer_device->peer_devices);
2904 kfree(peer_device);
2905 }
2906 idr_remove(&resource->devices, vnr);
2907 out_idr_remove_minor:
2908 idr_remove(&drbd_devices, minor);
2909 synchronize_rcu();
2910 out_no_minor_idr:
2911 drbd_bm_cleanup(device);
2912 out_no_bitmap:
2913 __free_page(device->md_io.page);
2914 out_no_io_page:
2915 put_disk(disk);
2916 out_no_disk:
2917 blk_cleanup_queue(q);
2918 out_no_q:
2919 kref_put(&resource->kref, drbd_destroy_resource);
2920 kfree(device);
2921 return err;
2922 }
2923
drbd_delete_device(struct drbd_device * device)2924 void drbd_delete_device(struct drbd_device *device)
2925 {
2926 struct drbd_resource *resource = device->resource;
2927 struct drbd_connection *connection;
2928 struct drbd_peer_device *peer_device;
2929
2930 /* move to free_peer_device() */
2931 for_each_peer_device(peer_device, device)
2932 drbd_debugfs_peer_device_cleanup(peer_device);
2933 drbd_debugfs_device_cleanup(device);
2934 for_each_connection(connection, resource) {
2935 idr_remove(&connection->peer_devices, device->vnr);
2936 kref_put(&device->kref, drbd_destroy_device);
2937 }
2938 idr_remove(&resource->devices, device->vnr);
2939 kref_put(&device->kref, drbd_destroy_device);
2940 idr_remove(&drbd_devices, device_to_minor(device));
2941 kref_put(&device->kref, drbd_destroy_device);
2942 del_gendisk(device->vdisk);
2943 synchronize_rcu();
2944 kref_put(&device->kref, drbd_destroy_device);
2945 }
2946
drbd_init(void)2947 static int __init drbd_init(void)
2948 {
2949 int err;
2950
2951 if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2952 pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2953 #ifdef MODULE
2954 return -EINVAL;
2955 #else
2956 drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2957 #endif
2958 }
2959
2960 err = register_blkdev(DRBD_MAJOR, "drbd");
2961 if (err) {
2962 pr_err("unable to register block device major %d\n",
2963 DRBD_MAJOR);
2964 return err;
2965 }
2966
2967 /*
2968 * allocate all necessary structs
2969 */
2970 init_waitqueue_head(&drbd_pp_wait);
2971
2972 drbd_proc = NULL; /* play safe for drbd_cleanup */
2973 idr_init(&drbd_devices);
2974
2975 mutex_init(&resources_mutex);
2976 INIT_LIST_HEAD(&drbd_resources);
2977
2978 err = drbd_genl_register();
2979 if (err) {
2980 pr_err("unable to register generic netlink family\n");
2981 goto fail;
2982 }
2983
2984 err = drbd_create_mempools();
2985 if (err)
2986 goto fail;
2987
2988 err = -ENOMEM;
2989 drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
2990 if (!drbd_proc) {
2991 pr_err("unable to register proc file\n");
2992 goto fail;
2993 }
2994
2995 retry.wq = create_singlethread_workqueue("drbd-reissue");
2996 if (!retry.wq) {
2997 pr_err("unable to create retry workqueue\n");
2998 goto fail;
2999 }
3000 INIT_WORK(&retry.worker, do_retry);
3001 spin_lock_init(&retry.lock);
3002 INIT_LIST_HEAD(&retry.writes);
3003
3004 if (drbd_debugfs_init())
3005 pr_notice("failed to initialize debugfs -- will not be available\n");
3006
3007 pr_info("initialized. "
3008 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3009 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3010 pr_info("%s\n", drbd_buildtag());
3011 pr_info("registered as block device major %d\n", DRBD_MAJOR);
3012 return 0; /* Success! */
3013
3014 fail:
3015 drbd_cleanup();
3016 if (err == -ENOMEM)
3017 pr_err("ran out of memory\n");
3018 else
3019 pr_err("initialization failure\n");
3020 return err;
3021 }
3022
drbd_free_one_sock(struct drbd_socket * ds)3023 static void drbd_free_one_sock(struct drbd_socket *ds)
3024 {
3025 struct socket *s;
3026 mutex_lock(&ds->mutex);
3027 s = ds->socket;
3028 ds->socket = NULL;
3029 mutex_unlock(&ds->mutex);
3030 if (s) {
3031 /* so debugfs does not need to mutex_lock() */
3032 synchronize_rcu();
3033 kernel_sock_shutdown(s, SHUT_RDWR);
3034 sock_release(s);
3035 }
3036 }
3037
drbd_free_sock(struct drbd_connection * connection)3038 void drbd_free_sock(struct drbd_connection *connection)
3039 {
3040 if (connection->data.socket)
3041 drbd_free_one_sock(&connection->data);
3042 if (connection->meta.socket)
3043 drbd_free_one_sock(&connection->meta);
3044 }
3045
3046 /* meta data management */
3047
conn_md_sync(struct drbd_connection * connection)3048 void conn_md_sync(struct drbd_connection *connection)
3049 {
3050 struct drbd_peer_device *peer_device;
3051 int vnr;
3052
3053 rcu_read_lock();
3054 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3055 struct drbd_device *device = peer_device->device;
3056
3057 kref_get(&device->kref);
3058 rcu_read_unlock();
3059 drbd_md_sync(device);
3060 kref_put(&device->kref, drbd_destroy_device);
3061 rcu_read_lock();
3062 }
3063 rcu_read_unlock();
3064 }
3065
3066 /* aligned 4kByte */
3067 struct meta_data_on_disk {
3068 u64 la_size_sect; /* last agreed size. */
3069 u64 uuid[UI_SIZE]; /* UUIDs. */
3070 u64 device_uuid;
3071 u64 reserved_u64_1;
3072 u32 flags; /* MDF */
3073 u32 magic;
3074 u32 md_size_sect;
3075 u32 al_offset; /* offset to this block */
3076 u32 al_nr_extents; /* important for restoring the AL (userspace) */
3077 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3078 u32 bm_offset; /* offset to the bitmap, from here */
3079 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3080 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3081
3082 /* see al_tr_number_to_on_disk_sector() */
3083 u32 al_stripes;
3084 u32 al_stripe_size_4k;
3085
3086 u8 reserved_u8[4096 - (7*8 + 10*4)];
3087 } __packed;
3088
3089
3090
drbd_md_write(struct drbd_device * device,void * b)3091 void drbd_md_write(struct drbd_device *device, void *b)
3092 {
3093 struct meta_data_on_disk *buffer = b;
3094 sector_t sector;
3095 int i;
3096
3097 memset(buffer, 0, sizeof(*buffer));
3098
3099 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3100 for (i = UI_CURRENT; i < UI_SIZE; i++)
3101 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3102 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3103 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3104
3105 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3106 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3107 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3108 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3109 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3110
3111 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3112 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3113
3114 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3115 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3116
3117 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3118 sector = device->ldev->md.md_offset;
3119
3120 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3121 /* this was a try anyways ... */
3122 drbd_err(device, "meta data update failed!\n");
3123 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3124 }
3125 }
3126
3127 /**
3128 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3129 * @device: DRBD device.
3130 */
drbd_md_sync(struct drbd_device * device)3131 void drbd_md_sync(struct drbd_device *device)
3132 {
3133 struct meta_data_on_disk *buffer;
3134
3135 /* Don't accidentally change the DRBD meta data layout. */
3136 BUILD_BUG_ON(UI_SIZE != 4);
3137 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3138
3139 del_timer(&device->md_sync_timer);
3140 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3141 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3142 return;
3143
3144 /* We use here D_FAILED and not D_ATTACHING because we try to write
3145 * metadata even if we detach due to a disk failure! */
3146 if (!get_ldev_if_state(device, D_FAILED))
3147 return;
3148
3149 buffer = drbd_md_get_buffer(device, __func__);
3150 if (!buffer)
3151 goto out;
3152
3153 drbd_md_write(device, buffer);
3154
3155 /* Update device->ldev->md.la_size_sect,
3156 * since we updated it on metadata. */
3157 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3158
3159 drbd_md_put_buffer(device);
3160 out:
3161 put_ldev(device);
3162 }
3163
check_activity_log_stripe_size(struct drbd_device * device,struct meta_data_on_disk * on_disk,struct drbd_md * in_core)3164 static int check_activity_log_stripe_size(struct drbd_device *device,
3165 struct meta_data_on_disk *on_disk,
3166 struct drbd_md *in_core)
3167 {
3168 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3169 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3170 u64 al_size_4k;
3171
3172 /* both not set: default to old fixed size activity log */
3173 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3174 al_stripes = 1;
3175 al_stripe_size_4k = MD_32kB_SECT/8;
3176 }
3177
3178 /* some paranoia plausibility checks */
3179
3180 /* we need both values to be set */
3181 if (al_stripes == 0 || al_stripe_size_4k == 0)
3182 goto err;
3183
3184 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3185
3186 /* Upper limit of activity log area, to avoid potential overflow
3187 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3188 * than 72 * 4k blocks total only increases the amount of history,
3189 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3190 if (al_size_4k > (16 * 1024 * 1024/4))
3191 goto err;
3192
3193 /* Lower limit: we need at least 8 transaction slots (32kB)
3194 * to not break existing setups */
3195 if (al_size_4k < MD_32kB_SECT/8)
3196 goto err;
3197
3198 in_core->al_stripe_size_4k = al_stripe_size_4k;
3199 in_core->al_stripes = al_stripes;
3200 in_core->al_size_4k = al_size_4k;
3201
3202 return 0;
3203 err:
3204 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3205 al_stripes, al_stripe_size_4k);
3206 return -EINVAL;
3207 }
3208
check_offsets_and_sizes(struct drbd_device * device,struct drbd_backing_dev * bdev)3209 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3210 {
3211 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3212 struct drbd_md *in_core = &bdev->md;
3213 s32 on_disk_al_sect;
3214 s32 on_disk_bm_sect;
3215
3216 /* The on-disk size of the activity log, calculated from offsets, and
3217 * the size of the activity log calculated from the stripe settings,
3218 * should match.
3219 * Though we could relax this a bit: it is ok, if the striped activity log
3220 * fits in the available on-disk activity log size.
3221 * Right now, that would break how resize is implemented.
3222 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3223 * of possible unused padding space in the on disk layout. */
3224 if (in_core->al_offset < 0) {
3225 if (in_core->bm_offset > in_core->al_offset)
3226 goto err;
3227 on_disk_al_sect = -in_core->al_offset;
3228 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3229 } else {
3230 if (in_core->al_offset != MD_4kB_SECT)
3231 goto err;
3232 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3233 goto err;
3234
3235 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3236 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3237 }
3238
3239 /* old fixed size meta data is exactly that: fixed. */
3240 if (in_core->meta_dev_idx >= 0) {
3241 if (in_core->md_size_sect != MD_128MB_SECT
3242 || in_core->al_offset != MD_4kB_SECT
3243 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3244 || in_core->al_stripes != 1
3245 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3246 goto err;
3247 }
3248
3249 if (capacity < in_core->md_size_sect)
3250 goto err;
3251 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3252 goto err;
3253
3254 /* should be aligned, and at least 32k */
3255 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3256 goto err;
3257
3258 /* should fit (for now: exactly) into the available on-disk space;
3259 * overflow prevention is in check_activity_log_stripe_size() above. */
3260 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3261 goto err;
3262
3263 /* again, should be aligned */
3264 if (in_core->bm_offset & 7)
3265 goto err;
3266
3267 /* FIXME check for device grow with flex external meta data? */
3268
3269 /* can the available bitmap space cover the last agreed device size? */
3270 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3271 goto err;
3272
3273 return 0;
3274
3275 err:
3276 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3277 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3278 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3279 in_core->meta_dev_idx,
3280 in_core->al_stripes, in_core->al_stripe_size_4k,
3281 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3282 (unsigned long long)in_core->la_size_sect,
3283 (unsigned long long)capacity);
3284
3285 return -EINVAL;
3286 }
3287
3288
3289 /**
3290 * drbd_md_read() - Reads in the meta data super block
3291 * @device: DRBD device.
3292 * @bdev: Device from which the meta data should be read in.
3293 *
3294 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3295 * something goes wrong.
3296 *
3297 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3298 * even before @bdev is assigned to @device->ldev.
3299 */
drbd_md_read(struct drbd_device * device,struct drbd_backing_dev * bdev)3300 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3301 {
3302 struct meta_data_on_disk *buffer;
3303 u32 magic, flags;
3304 int i, rv = NO_ERROR;
3305
3306 if (device->state.disk != D_DISKLESS)
3307 return ERR_DISK_CONFIGURED;
3308
3309 buffer = drbd_md_get_buffer(device, __func__);
3310 if (!buffer)
3311 return ERR_NOMEM;
3312
3313 /* First, figure out where our meta data superblock is located,
3314 * and read it. */
3315 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3316 bdev->md.md_offset = drbd_md_ss(bdev);
3317 /* Even for (flexible or indexed) external meta data,
3318 * initially restrict us to the 4k superblock for now.
3319 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3320 bdev->md.md_size_sect = 8;
3321
3322 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3323 REQ_OP_READ)) {
3324 /* NOTE: can't do normal error processing here as this is
3325 called BEFORE disk is attached */
3326 drbd_err(device, "Error while reading metadata.\n");
3327 rv = ERR_IO_MD_DISK;
3328 goto err;
3329 }
3330
3331 magic = be32_to_cpu(buffer->magic);
3332 flags = be32_to_cpu(buffer->flags);
3333 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3334 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3335 /* btw: that's Activity Log clean, not "all" clean. */
3336 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3337 rv = ERR_MD_UNCLEAN;
3338 goto err;
3339 }
3340
3341 rv = ERR_MD_INVALID;
3342 if (magic != DRBD_MD_MAGIC_08) {
3343 if (magic == DRBD_MD_MAGIC_07)
3344 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3345 else
3346 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3347 goto err;
3348 }
3349
3350 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3351 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3352 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3353 goto err;
3354 }
3355
3356
3357 /* convert to in_core endian */
3358 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3359 for (i = UI_CURRENT; i < UI_SIZE; i++)
3360 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3361 bdev->md.flags = be32_to_cpu(buffer->flags);
3362 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3363
3364 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3365 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3366 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3367
3368 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3369 goto err;
3370 if (check_offsets_and_sizes(device, bdev))
3371 goto err;
3372
3373 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3374 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3375 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3376 goto err;
3377 }
3378 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3379 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3380 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3381 goto err;
3382 }
3383
3384 rv = NO_ERROR;
3385
3386 spin_lock_irq(&device->resource->req_lock);
3387 if (device->state.conn < C_CONNECTED) {
3388 unsigned int peer;
3389 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3390 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3391 device->peer_max_bio_size = peer;
3392 }
3393 spin_unlock_irq(&device->resource->req_lock);
3394
3395 err:
3396 drbd_md_put_buffer(device);
3397
3398 return rv;
3399 }
3400
3401 /**
3402 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3403 * @device: DRBD device.
3404 *
3405 * Call this function if you change anything that should be written to
3406 * the meta-data super block. This function sets MD_DIRTY, and starts a
3407 * timer that ensures that within five seconds you have to call drbd_md_sync().
3408 */
3409 #ifdef DEBUG
drbd_md_mark_dirty_(struct drbd_device * device,unsigned int line,const char * func)3410 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3411 {
3412 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3413 mod_timer(&device->md_sync_timer, jiffies + HZ);
3414 device->last_md_mark_dirty.line = line;
3415 device->last_md_mark_dirty.func = func;
3416 }
3417 }
3418 #else
drbd_md_mark_dirty(struct drbd_device * device)3419 void drbd_md_mark_dirty(struct drbd_device *device)
3420 {
3421 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3422 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3423 }
3424 #endif
3425
drbd_uuid_move_history(struct drbd_device * device)3426 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3427 {
3428 int i;
3429
3430 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3431 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3432 }
3433
__drbd_uuid_set(struct drbd_device * device,int idx,u64 val)3434 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3435 {
3436 if (idx == UI_CURRENT) {
3437 if (device->state.role == R_PRIMARY)
3438 val |= 1;
3439 else
3440 val &= ~((u64)1);
3441
3442 drbd_set_ed_uuid(device, val);
3443 }
3444
3445 device->ldev->md.uuid[idx] = val;
3446 drbd_md_mark_dirty(device);
3447 }
3448
_drbd_uuid_set(struct drbd_device * device,int idx,u64 val)3449 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3450 {
3451 unsigned long flags;
3452 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3453 __drbd_uuid_set(device, idx, val);
3454 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3455 }
3456
drbd_uuid_set(struct drbd_device * device,int idx,u64 val)3457 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3458 {
3459 unsigned long flags;
3460 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3461 if (device->ldev->md.uuid[idx]) {
3462 drbd_uuid_move_history(device);
3463 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3464 }
3465 __drbd_uuid_set(device, idx, val);
3466 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3467 }
3468
3469 /**
3470 * drbd_uuid_new_current() - Creates a new current UUID
3471 * @device: DRBD device.
3472 *
3473 * Creates a new current UUID, and rotates the old current UUID into
3474 * the bitmap slot. Causes an incremental resync upon next connect.
3475 */
drbd_uuid_new_current(struct drbd_device * device)3476 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3477 {
3478 u64 val;
3479 unsigned long long bm_uuid;
3480
3481 get_random_bytes(&val, sizeof(u64));
3482
3483 spin_lock_irq(&device->ldev->md.uuid_lock);
3484 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3485
3486 if (bm_uuid)
3487 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3488
3489 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3490 __drbd_uuid_set(device, UI_CURRENT, val);
3491 spin_unlock_irq(&device->ldev->md.uuid_lock);
3492
3493 drbd_print_uuids(device, "new current UUID");
3494 /* get it to stable storage _now_ */
3495 drbd_md_sync(device);
3496 }
3497
drbd_uuid_set_bm(struct drbd_device * device,u64 val)3498 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3499 {
3500 unsigned long flags;
3501 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3502 return;
3503
3504 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3505 if (val == 0) {
3506 drbd_uuid_move_history(device);
3507 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3508 device->ldev->md.uuid[UI_BITMAP] = 0;
3509 } else {
3510 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3511 if (bm_uuid)
3512 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3513
3514 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3515 }
3516 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3517
3518 drbd_md_mark_dirty(device);
3519 }
3520
3521 /**
3522 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3523 * @device: DRBD device.
3524 *
3525 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3526 */
drbd_bmio_set_n_write(struct drbd_device * device)3527 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3528 {
3529 int rv = -EIO;
3530
3531 drbd_md_set_flag(device, MDF_FULL_SYNC);
3532 drbd_md_sync(device);
3533 drbd_bm_set_all(device);
3534
3535 rv = drbd_bm_write(device);
3536
3537 if (!rv) {
3538 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3539 drbd_md_sync(device);
3540 }
3541
3542 return rv;
3543 }
3544
3545 /**
3546 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3547 * @device: DRBD device.
3548 *
3549 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3550 */
drbd_bmio_clear_n_write(struct drbd_device * device)3551 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3552 {
3553 drbd_resume_al(device);
3554 drbd_bm_clear_all(device);
3555 return drbd_bm_write(device);
3556 }
3557
w_bitmap_io(struct drbd_work * w,int unused)3558 static int w_bitmap_io(struct drbd_work *w, int unused)
3559 {
3560 struct drbd_device *device =
3561 container_of(w, struct drbd_device, bm_io_work.w);
3562 struct bm_io_work *work = &device->bm_io_work;
3563 int rv = -EIO;
3564
3565 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3566 int cnt = atomic_read(&device->ap_bio_cnt);
3567 if (cnt)
3568 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3569 cnt, work->why);
3570 }
3571
3572 if (get_ldev(device)) {
3573 drbd_bm_lock(device, work->why, work->flags);
3574 rv = work->io_fn(device);
3575 drbd_bm_unlock(device);
3576 put_ldev(device);
3577 }
3578
3579 clear_bit_unlock(BITMAP_IO, &device->flags);
3580 wake_up(&device->misc_wait);
3581
3582 if (work->done)
3583 work->done(device, rv);
3584
3585 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3586 work->why = NULL;
3587 work->flags = 0;
3588
3589 return 0;
3590 }
3591
3592 /**
3593 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3594 * @device: DRBD device.
3595 * @io_fn: IO callback to be called when bitmap IO is possible
3596 * @done: callback to be called after the bitmap IO was performed
3597 * @why: Descriptive text of the reason for doing the IO
3598 *
3599 * While IO on the bitmap happens we freeze application IO thus we ensure
3600 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3601 * called from worker context. It MUST NOT be used while a previous such
3602 * work is still pending!
3603 *
3604 * Its worker function encloses the call of io_fn() by get_ldev() and
3605 * put_ldev().
3606 */
drbd_queue_bitmap_io(struct drbd_device * device,int (* io_fn)(struct drbd_device *),void (* done)(struct drbd_device *,int),char * why,enum bm_flag flags)3607 void drbd_queue_bitmap_io(struct drbd_device *device,
3608 int (*io_fn)(struct drbd_device *),
3609 void (*done)(struct drbd_device *, int),
3610 char *why, enum bm_flag flags)
3611 {
3612 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3613
3614 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3615 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3616 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3617 if (device->bm_io_work.why)
3618 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3619 why, device->bm_io_work.why);
3620
3621 device->bm_io_work.io_fn = io_fn;
3622 device->bm_io_work.done = done;
3623 device->bm_io_work.why = why;
3624 device->bm_io_work.flags = flags;
3625
3626 spin_lock_irq(&device->resource->req_lock);
3627 set_bit(BITMAP_IO, &device->flags);
3628 /* don't wait for pending application IO if the caller indicates that
3629 * application IO does not conflict anyways. */
3630 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3631 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3632 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3633 &device->bm_io_work.w);
3634 }
3635 spin_unlock_irq(&device->resource->req_lock);
3636 }
3637
3638 /**
3639 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3640 * @device: DRBD device.
3641 * @io_fn: IO callback to be called when bitmap IO is possible
3642 * @why: Descriptive text of the reason for doing the IO
3643 *
3644 * freezes application IO while that the actual IO operations runs. This
3645 * functions MAY NOT be called from worker context.
3646 */
drbd_bitmap_io(struct drbd_device * device,int (* io_fn)(struct drbd_device *),char * why,enum bm_flag flags)3647 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3648 char *why, enum bm_flag flags)
3649 {
3650 /* Only suspend io, if some operation is supposed to be locked out */
3651 const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3652 int rv;
3653
3654 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3655
3656 if (do_suspend_io)
3657 drbd_suspend_io(device);
3658
3659 drbd_bm_lock(device, why, flags);
3660 rv = io_fn(device);
3661 drbd_bm_unlock(device);
3662
3663 if (do_suspend_io)
3664 drbd_resume_io(device);
3665
3666 return rv;
3667 }
3668
drbd_md_set_flag(struct drbd_device * device,int flag)3669 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3670 {
3671 if ((device->ldev->md.flags & flag) != flag) {
3672 drbd_md_mark_dirty(device);
3673 device->ldev->md.flags |= flag;
3674 }
3675 }
3676
drbd_md_clear_flag(struct drbd_device * device,int flag)3677 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3678 {
3679 if ((device->ldev->md.flags & flag) != 0) {
3680 drbd_md_mark_dirty(device);
3681 device->ldev->md.flags &= ~flag;
3682 }
3683 }
drbd_md_test_flag(struct drbd_backing_dev * bdev,int flag)3684 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3685 {
3686 return (bdev->md.flags & flag) != 0;
3687 }
3688
md_sync_timer_fn(struct timer_list * t)3689 static void md_sync_timer_fn(struct timer_list *t)
3690 {
3691 struct drbd_device *device = from_timer(device, t, md_sync_timer);
3692 drbd_device_post_work(device, MD_SYNC);
3693 }
3694
cmdname(enum drbd_packet cmd)3695 const char *cmdname(enum drbd_packet cmd)
3696 {
3697 /* THINK may need to become several global tables
3698 * when we want to support more than
3699 * one PRO_VERSION */
3700 static const char *cmdnames[] = {
3701 [P_DATA] = "Data",
3702 [P_WSAME] = "WriteSame",
3703 [P_TRIM] = "Trim",
3704 [P_DATA_REPLY] = "DataReply",
3705 [P_RS_DATA_REPLY] = "RSDataReply",
3706 [P_BARRIER] = "Barrier",
3707 [P_BITMAP] = "ReportBitMap",
3708 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3709 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3710 [P_UNPLUG_REMOTE] = "UnplugRemote",
3711 [P_DATA_REQUEST] = "DataRequest",
3712 [P_RS_DATA_REQUEST] = "RSDataRequest",
3713 [P_SYNC_PARAM] = "SyncParam",
3714 [P_SYNC_PARAM89] = "SyncParam89",
3715 [P_PROTOCOL] = "ReportProtocol",
3716 [P_UUIDS] = "ReportUUIDs",
3717 [P_SIZES] = "ReportSizes",
3718 [P_STATE] = "ReportState",
3719 [P_SYNC_UUID] = "ReportSyncUUID",
3720 [P_AUTH_CHALLENGE] = "AuthChallenge",
3721 [P_AUTH_RESPONSE] = "AuthResponse",
3722 [P_PING] = "Ping",
3723 [P_PING_ACK] = "PingAck",
3724 [P_RECV_ACK] = "RecvAck",
3725 [P_WRITE_ACK] = "WriteAck",
3726 [P_RS_WRITE_ACK] = "RSWriteAck",
3727 [P_SUPERSEDED] = "Superseded",
3728 [P_NEG_ACK] = "NegAck",
3729 [P_NEG_DREPLY] = "NegDReply",
3730 [P_NEG_RS_DREPLY] = "NegRSDReply",
3731 [P_BARRIER_ACK] = "BarrierAck",
3732 [P_STATE_CHG_REQ] = "StateChgRequest",
3733 [P_STATE_CHG_REPLY] = "StateChgReply",
3734 [P_OV_REQUEST] = "OVRequest",
3735 [P_OV_REPLY] = "OVReply",
3736 [P_OV_RESULT] = "OVResult",
3737 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3738 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3739 [P_COMPRESSED_BITMAP] = "CBitmap",
3740 [P_DELAY_PROBE] = "DelayProbe",
3741 [P_OUT_OF_SYNC] = "OutOfSync",
3742 [P_RETRY_WRITE] = "RetryWrite",
3743 [P_RS_CANCEL] = "RSCancel",
3744 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3745 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3746 [P_RETRY_WRITE] = "retry_write",
3747 [P_PROTOCOL_UPDATE] = "protocol_update",
3748 [P_RS_THIN_REQ] = "rs_thin_req",
3749 [P_RS_DEALLOCATED] = "rs_deallocated",
3750
3751 /* enum drbd_packet, but not commands - obsoleted flags:
3752 * P_MAY_IGNORE
3753 * P_MAX_OPT_CMD
3754 */
3755 };
3756
3757 /* too big for the array: 0xfffX */
3758 if (cmd == P_INITIAL_META)
3759 return "InitialMeta";
3760 if (cmd == P_INITIAL_DATA)
3761 return "InitialData";
3762 if (cmd == P_CONNECTION_FEATURES)
3763 return "ConnectionFeatures";
3764 if (cmd >= ARRAY_SIZE(cmdnames))
3765 return "Unknown";
3766 return cmdnames[cmd];
3767 }
3768
3769 /**
3770 * drbd_wait_misc - wait for a request to make progress
3771 * @device: device associated with the request
3772 * @i: the struct drbd_interval embedded in struct drbd_request or
3773 * struct drbd_peer_request
3774 */
drbd_wait_misc(struct drbd_device * device,struct drbd_interval * i)3775 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3776 {
3777 struct net_conf *nc;
3778 DEFINE_WAIT(wait);
3779 long timeout;
3780
3781 rcu_read_lock();
3782 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3783 if (!nc) {
3784 rcu_read_unlock();
3785 return -ETIMEDOUT;
3786 }
3787 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3788 rcu_read_unlock();
3789
3790 /* Indicate to wake up device->misc_wait on progress. */
3791 i->waiting = true;
3792 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3793 spin_unlock_irq(&device->resource->req_lock);
3794 timeout = schedule_timeout(timeout);
3795 finish_wait(&device->misc_wait, &wait);
3796 spin_lock_irq(&device->resource->req_lock);
3797 if (!timeout || device->state.conn < C_CONNECTED)
3798 return -ETIMEDOUT;
3799 if (signal_pending(current))
3800 return -ERESTARTSYS;
3801 return 0;
3802 }
3803
lock_all_resources(void)3804 void lock_all_resources(void)
3805 {
3806 struct drbd_resource *resource;
3807 int __maybe_unused i = 0;
3808
3809 mutex_lock(&resources_mutex);
3810 local_irq_disable();
3811 for_each_resource(resource, &drbd_resources)
3812 spin_lock_nested(&resource->req_lock, i++);
3813 }
3814
unlock_all_resources(void)3815 void unlock_all_resources(void)
3816 {
3817 struct drbd_resource *resource;
3818
3819 for_each_resource(resource, &drbd_resources)
3820 spin_unlock(&resource->req_lock);
3821 local_irq_enable();
3822 mutex_unlock(&resources_mutex);
3823 }
3824
3825 #ifdef CONFIG_DRBD_FAULT_INJECTION
3826 /* Fault insertion support including random number generator shamelessly
3827 * stolen from kernel/rcutorture.c */
3828 struct fault_random_state {
3829 unsigned long state;
3830 unsigned long count;
3831 };
3832
3833 #define FAULT_RANDOM_MULT 39916801 /* prime */
3834 #define FAULT_RANDOM_ADD 479001701 /* prime */
3835 #define FAULT_RANDOM_REFRESH 10000
3836
3837 /*
3838 * Crude but fast random-number generator. Uses a linear congruential
3839 * generator, with occasional help from get_random_bytes().
3840 */
3841 static unsigned long
_drbd_fault_random(struct fault_random_state * rsp)3842 _drbd_fault_random(struct fault_random_state *rsp)
3843 {
3844 long refresh;
3845
3846 if (!rsp->count--) {
3847 get_random_bytes(&refresh, sizeof(refresh));
3848 rsp->state += refresh;
3849 rsp->count = FAULT_RANDOM_REFRESH;
3850 }
3851 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3852 return swahw32(rsp->state);
3853 }
3854
3855 static char *
_drbd_fault_str(unsigned int type)3856 _drbd_fault_str(unsigned int type) {
3857 static char *_faults[] = {
3858 [DRBD_FAULT_MD_WR] = "Meta-data write",
3859 [DRBD_FAULT_MD_RD] = "Meta-data read",
3860 [DRBD_FAULT_RS_WR] = "Resync write",
3861 [DRBD_FAULT_RS_RD] = "Resync read",
3862 [DRBD_FAULT_DT_WR] = "Data write",
3863 [DRBD_FAULT_DT_RD] = "Data read",
3864 [DRBD_FAULT_DT_RA] = "Data read ahead",
3865 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3866 [DRBD_FAULT_AL_EE] = "EE allocation",
3867 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3868 };
3869
3870 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3871 }
3872
3873 unsigned int
_drbd_insert_fault(struct drbd_device * device,unsigned int type)3874 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3875 {
3876 static struct fault_random_state rrs = {0, 0};
3877
3878 unsigned int ret = (
3879 (drbd_fault_devs == 0 ||
3880 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3881 (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3882
3883 if (ret) {
3884 drbd_fault_count++;
3885
3886 if (__ratelimit(&drbd_ratelimit_state))
3887 drbd_warn(device, "***Simulating %s failure\n",
3888 _drbd_fault_str(type));
3889 }
3890
3891 return ret;
3892 }
3893 #endif
3894
drbd_buildtag(void)3895 const char *drbd_buildtag(void)
3896 {
3897 /* DRBD built from external sources has here a reference to the
3898 git hash of the source code. */
3899
3900 static char buildtag[38] = "\0uilt-in";
3901
3902 if (buildtag[0] == 0) {
3903 #ifdef MODULE
3904 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3905 #else
3906 buildtag[0] = 'b';
3907 #endif
3908 }
3909
3910 return buildtag;
3911 }
3912
3913 module_init(drbd_init)
3914 module_exit(drbd_cleanup)
3915
3916 EXPORT_SYMBOL(drbd_conn_str);
3917 EXPORT_SYMBOL(drbd_role_str);
3918 EXPORT_SYMBOL(drbd_disk_str);
3919 EXPORT_SYMBOL(drbd_set_st_err_str);
3920