Lines Matching +full:mode +full:- +full:recovery
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright 2004-2011 Red Hat, Inc.
20 #include "recovery.h"
26 * gfs2_update_stats - Update time based stats
52 s64 delta = sample - s->stats[index]; in gfs2_update_stats()
53 s->stats[index] += (delta >> 3); in gfs2_update_stats()
55 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; in gfs2_update_stats()
59 * gfs2_update_reply_times - Update locking statistics
62 * This assumes that gl->gl_dstamp has been set earlier.
72 * TRY_1CB flags are set are classified as non-blocking. All
78 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
79 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
84 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
85 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
86 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
87 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ in gfs2_update_reply_times()
94 * gfs2_update_request_times - Update locking statistics
97 * The irt (lock inter-request times) measures the average time
105 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
110 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
111 gl->gl_dstamp = ktime_get_real(); in gfs2_update_request_times()
112 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); in gfs2_update_request_times()
113 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_request_times()
114 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ in gfs2_update_request_times()
115 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ in gfs2_update_request_times()
122 unsigned ret = gl->gl_state; in gdlm_ast()
125 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); in gdlm_ast()
127 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) in gdlm_ast()
128 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); in gdlm_ast()
130 switch (gl->gl_lksb.sb_status) { in gdlm_ast()
131 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ in gdlm_ast()
132 if (gl->gl_ops->go_free) in gdlm_ast()
133 gl->gl_ops->go_free(gl); in gdlm_ast()
136 case -DLM_ECANCEL: /* Cancel while getting lock */ in gdlm_ast()
139 case -EAGAIN: /* Try lock fails */ in gdlm_ast()
140 case -EDEADLK: /* Deadlock detected */ in gdlm_ast()
142 case -ETIMEDOUT: /* Canceled due to timeout */ in gdlm_ast()
151 ret = gl->gl_req; in gdlm_ast()
152 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { in gdlm_ast()
153 if (gl->gl_req == LM_ST_SHARED) in gdlm_ast()
155 else if (gl->gl_req == LM_ST_DEFERRED) in gdlm_ast()
161 set_bit(GLF_INITIAL, &gl->gl_flags); in gdlm_ast()
165 if (!test_bit(GLF_INITIAL, &gl->gl_flags)) in gdlm_ast()
166 gl->gl_lksb.sb_lkid = 0; in gdlm_ast()
170 static void gdlm_bast(void *arg, int mode) in gdlm_bast() argument
174 switch (mode) { in gdlm_bast()
185 fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); in gdlm_bast()
190 /* convert gfs lock-state to dlm lock-mode */
206 return -1; in make_mode()
214 if (gl->gl_lksb.sb_lvbptr) in make_flags()
239 if (gl->gl_lksb.sb_lkid != 0) { in make_flags()
241 if (test_bit(GLF_BLOCKING, &gl->gl_flags)) in make_flags()
252 *c-- = hex_asc[value & 0x0f]; in gfs2_reverse_hex()
260 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_lock()
265 req = make_mode(gl->gl_name.ln_sbd, req_state); in gdlm_lock()
269 if (gl->gl_lksb.sb_lkid) { in gdlm_lock()
272 memset(strname, ' ', GDLM_STRNAME_BYTES - 1); in gdlm_lock()
273 strname[GDLM_STRNAME_BYTES - 1] = '\0'; in gdlm_lock()
274 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); in gdlm_lock()
275 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); in gdlm_lock()
276 gl->gl_dstamp = ktime_get_real(); in gdlm_lock()
282 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
283 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); in gdlm_lock()
288 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gdlm_put_lock()
289 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock()
292 if (gl->gl_lksb.sb_lkid == 0) { in gdlm_put_lock()
297 clear_bit(GLF_BLOCKING, &gl->gl_flags); in gdlm_put_lock()
303 if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { in gdlm_put_lock()
309 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && in gdlm_put_lock()
310 !gl->gl_lksb.sb_lvbptr) { in gdlm_put_lock()
315 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, in gdlm_put_lock()
319 gl->gl_name.ln_type, in gdlm_put_lock()
320 (unsigned long long)gl->gl_name.ln_number, error); in gdlm_put_lock()
327 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel()
328 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
332 * dlm/gfs2 recovery coordination using dlm_recover callbacks
336 * 2. dlm_controld blocks dlm-kernel locking activity
337 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
338 * 4. dlm_controld starts and finishes its own user level recovery
339 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
341 * 7. dlm_recoverd does its own lock recovery
342 * 8. dlm_recoverd unblocks dlm-kernel locking activity
347 * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
351 * - failures during recovery
366 * - more specific gfs2 steps in sequence above
378 * - parallel recovery steps across all nodes
392 * - is there a problem with clearing an lvb bit that should be set
393 * and missing a journal recovery?
404 * require recovery, because the mount in step 4 could not have
408 * and returning. The mount in step 4 waits until the recovery in
411 * - special case of first mounter: first node to mount the fs
414 * and recover any that need recovery before other nodes are allowed
424 * The mounted_lock is demoted to PR when first recovery is done, so
428 * mounter is doing first mount recovery of all journals.
429 * A mounting node needs to acquire control_lock in EX mode before
431 * the first mount recovery, blocking mounts from other nodes, then demotes
439 * do first mounter recovery
440 * mounted_lock EX->PR
441 * control_lock EX->NL, write lvb generation
444 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
445 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
448 * control_lock EX->NL
451 * - mount during recovery
453 * If a node mounts while others are doing recovery (not first mounter),
462 * - control_lock lvb format
468 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
469 * that jid N needs recovery.
478 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
487 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
489 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
495 GDLM_LVB_SIZE - JID_BITMAP_OFFSET); in all_jid_bits_clear()
501 complete(&ls->ls_sync_wait); in sync_wait_cb()
506 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock()
509 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
512 name, lksb->sb_lkid, error); in sync_unlock()
516 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
518 if (lksb->sb_status != -DLM_EUNLOCK) { in sync_unlock()
520 name, lksb->sb_lkid, lksb->sb_status); in sync_unlock()
521 return -1; in sync_unlock()
526 static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, in sync_lock() argument
529 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock()
536 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
537 strname, GDLM_STRNAME_BYTES - 1, in sync_lock()
540 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", in sync_lock()
541 name, lksb->sb_lkid, flags, mode, error); in sync_lock()
545 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
547 status = lksb->sb_status; in sync_lock()
549 if (status && status != -EAGAIN) { in sync_lock()
550 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n", in sync_lock()
551 name, lksb->sb_lkid, flags, mode, status); in sync_lock()
559 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock()
560 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
563 static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) in mounted_lock() argument
565 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock()
566 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK, in mounted_lock()
567 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
572 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock()
573 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
576 static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) in control_lock() argument
578 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock()
579 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK, in control_lock()
580 &ls->ls_control_lksb, "control_lock"); in control_lock()
584 * remote_withdraw - react to a node withdrawing from the file system
592 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { in remote_withdraw()
593 if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) in remote_withdraw()
608 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func()
616 if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) { in gfs2_control_func()
618 clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); in gfs2_control_func()
622 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
628 * FIRST_MOUNT means this node is doing first mounter recovery, in gfs2_control_func()
629 * for which recovery control is handled by in gfs2_control_func()
632 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
633 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
634 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
637 block_gen = ls->ls_recover_block; in gfs2_control_func()
638 start_gen = ls->ls_recover_start; in gfs2_control_func()
639 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
644 * dlm recovery is in progress and dlm locking is blocked. in gfs2_control_func()
653 * dlm_recoverd adds to recover_submit[] jids needing recovery in gfs2_control_func()
654 * gfs2_recover adds to recover_result[] journal recovery results in gfs2_control_func()
660 * the journal recovery is SUCCESS in gfs2_control_func()
669 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
671 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
672 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
673 start_gen != ls->ls_recover_start) { in gfs2_control_func()
675 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
676 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
681 recover_size = ls->ls_recover_size; in gfs2_control_func()
688 * in succession. Only the first will really do recovery, in gfs2_control_func()
690 * recovery. So, another node may have already recovered in gfs2_control_func()
694 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
697 ls->ls_recover_result[i] = 0; in gfs2_control_func()
699 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
702 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
712 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
714 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
715 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
722 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
724 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
725 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
726 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
737 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
740 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
754 * and clear a jid bit in the lvb if the recovery is a success. in gfs2_control_func()
760 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
771 * No more jid bits set in lvb, all recovery is done, unblock locks in gfs2_control_func()
776 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
777 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
778 ls->ls_recover_start == start_gen) { in gfs2_control_func()
779 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
780 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
785 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
786 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
792 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount()
798 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
799 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
800 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
801 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
802 init_completion(&ls->ls_sync_wait); in control_mount()
804 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
822 error = -EINTR; in control_mount()
839 * Other nodes need to do some work in dlm recovery and gfs2_control in control_mount()
853 if (error == -EAGAIN) { in control_mount()
862 * we cannot do the first-mount responsibility it implies: recovery. in control_mount()
864 if (sdp->sd_args.ar_spectator) in control_mount()
871 } else if (error != -EAGAIN) { in control_mount()
881 /* not even -EAGAIN should happen here */ in control_mount()
895 * lvb_gen will be non-zero. in control_mount()
898 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
903 error = -EINVAL; in control_mount()
908 /* first mounter, keep both EX while doing first recovery */ in control_mount()
909 spin_lock(&ls->ls_recover_spin); in control_mount()
910 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
911 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
912 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
913 spin_unlock(&ls->ls_recover_spin); in control_mount()
928 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
929 /* journals need recovery, wait until all are clear */ in control_mount()
930 fs_info(sdp, "control_mount wait for journal recovery\n"); in control_mount()
934 spin_lock(&ls->ls_recover_spin); in control_mount()
935 block_gen = ls->ls_recover_block; in control_mount()
936 start_gen = ls->ls_recover_start; in control_mount()
937 mount_gen = ls->ls_recover_mount; in control_mount()
941 generation, which might include new recovery bits set */ in control_mount()
942 if (sdp->sd_args.ar_spectator) { in control_mount()
943 fs_info(sdp, "Recovery is required. Waiting for a " in control_mount()
944 "non-spectator to mount.\n"); in control_mount()
950 ls->ls_recover_flags); in control_mount()
952 spin_unlock(&ls->ls_recover_spin); in control_mount()
958 latest recovery generation */ in control_mount()
961 lvb_gen, ls->ls_recover_flags); in control_mount()
962 spin_unlock(&ls->ls_recover_spin); in control_mount()
967 /* dlm recovery in progress, wait for it to finish */ in control_mount()
970 lvb_gen, ls->ls_recover_flags); in control_mount()
971 spin_unlock(&ls->ls_recover_spin); in control_mount()
975 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
976 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
977 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
978 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
979 spin_unlock(&ls->ls_recover_spin); in control_mount()
990 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done()
995 spin_lock(&ls->ls_recover_spin); in control_first_done()
996 start_gen = ls->ls_recover_start; in control_first_done()
997 block_gen = ls->ls_recover_block; in control_first_done()
999 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
1000 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
1001 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
1004 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
1005 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1007 return -1; in control_first_done()
1012 * Wait for the end of a dlm recovery cycle to switch from in control_first_done()
1013 * first mounter recovery. We can ignore any recover_slot in control_first_done()
1016 * have not fully mounted, so they don't need recovery. in control_first_done()
1018 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1021 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
1026 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
1027 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
1028 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1029 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1030 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1032 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
1033 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1049 * gfs2 jids start at 0, so jid = slot - 1)
1057 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size()
1063 if (!ls->ls_lvb_bits) { in set_recover_size()
1064 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1065 if (!ls->ls_lvb_bits) in set_recover_size()
1066 return -ENOMEM; in set_recover_size()
1071 if (max_jid < slots[i].slot - 1) in set_recover_size()
1072 max_jid = slots[i].slot - 1; in set_recover_size()
1075 old_size = ls->ls_recover_size; in set_recover_size()
1087 return -ENOMEM; in set_recover_size()
1090 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1091 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1092 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1093 kfree(ls->ls_recover_submit); in set_recover_size()
1094 kfree(ls->ls_recover_result); in set_recover_size()
1095 ls->ls_recover_submit = submit; in set_recover_size()
1096 ls->ls_recover_result = result; in set_recover_size()
1097 ls->ls_recover_size = new_size; in set_recover_size()
1098 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1104 kfree(ls->ls_lvb_bits); in free_recover_size()
1105 kfree(ls->ls_recover_submit); in free_recover_size()
1106 kfree(ls->ls_recover_result); in free_recover_size()
1107 ls->ls_recover_submit = NULL; in free_recover_size()
1108 ls->ls_recover_result = NULL; in free_recover_size()
1109 ls->ls_recover_size = 0; in free_recover_size()
1110 ls->ls_lvb_bits = NULL; in free_recover_size()
1113 /* dlm calls before it does lock recovery */
1118 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep()
1124 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1125 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1126 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1128 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1129 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1130 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1133 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1134 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1143 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot()
1144 int jid = slot->slot - 1; in gdlm_recover_slot()
1151 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1152 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1154 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1155 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1159 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1161 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1163 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1164 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1167 /* dlm calls after recover_slot and after it completes lock recovery */
1173 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done()
1182 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1183 ls->ls_recover_start = generation; in gdlm_recover_done()
1185 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1186 ls->ls_recover_mount = generation; in gdlm_recover_done()
1187 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1190 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1191 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); in gdlm_recover_done()
1193 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1195 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1196 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1199 /* gfs2_recover thread has a journal recovery result */
1204 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result()
1211 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1214 /* don't care about the recovery of own journal during mount */ in gdlm_recovery_result()
1215 if (jid == ls->ls_jid) in gdlm_recovery_result()
1218 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1219 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1220 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1223 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1225 jid, ls->ls_recover_size); in gdlm_recovery_result()
1226 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1233 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1239 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1240 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, in gdlm_recovery_result()
1242 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1253 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount()
1263 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); in gdlm_mount()
1264 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1265 ls->ls_recover_flags = 0; in gdlm_mount()
1266 ls->ls_recover_mount = 0; in gdlm_mount()
1267 ls->ls_recover_start = 0; in gdlm_mount()
1268 ls->ls_recover_block = 0; in gdlm_mount()
1269 ls->ls_recover_size = 0; in gdlm_mount()
1270 ls->ls_recover_submit = NULL; in gdlm_mount()
1271 ls->ls_recover_result = NULL; in gdlm_mount()
1272 ls->ls_lvb_bits = NULL; in gdlm_mount()
1285 error = -EINVAL; in gdlm_mount()
1289 memcpy(cluster, table, strlen(table) - strlen(fsname)); in gdlm_mount()
1300 &ls->ls_dlm); in gdlm_mount()
1313 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1317 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { in gdlm_mount()
1319 error = -EINVAL; in gdlm_mount()
1334 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1335 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); in gdlm_mount()
1337 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); in gdlm_mount()
1341 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_mount()
1350 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done()
1353 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1363 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount()
1365 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1370 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1371 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1372 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1373 flush_delayed_work(&sdp->sd_control_work); in gdlm_unmount()
1375 /* mounted_lock and control_lock will be purged in dlm recovery */ in gdlm_unmount()
1377 if (ls->ls_dlm) { in gdlm_unmount()
1378 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_unmount()
1379 ls->ls_dlm = NULL; in gdlm_unmount()