Lines Matching full:ls

260 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;  in gdlm_lock()  local
282 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
289 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock() local
303 if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { in gdlm_put_lock()
315 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, in gdlm_put_lock()
327 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel() local
328 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
474 static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, in control_lvb_read() argument
478 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
483 static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, in control_lvb_write() argument
487 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
489 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
500 struct lm_lockstruct *ls = arg; in sync_wait_cb() local
501 complete(&ls->ls_sync_wait); in sync_wait_cb()
506 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock() local
509 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
516 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
529 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock() local
536 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
538 0, sync_wait_cb, ls, NULL); in sync_lock()
545 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
559 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock() local
560 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
565 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock() local
567 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
572 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock() local
573 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
578 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock() local
580 &ls->ls_control_lksb, "control_lock"); in control_lock()
608 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func() local
622 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
632 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
633 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
634 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
637 block_gen = ls->ls_recover_block; in gfs2_control_func()
638 start_gen = ls->ls_recover_start; in gfs2_control_func()
639 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
669 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
671 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
672 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
673 start_gen != ls->ls_recover_start) { in gfs2_control_func()
675 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
676 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
681 recover_size = ls->ls_recover_size; in gfs2_control_func()
694 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
697 ls->ls_recover_result[i] = 0; in gfs2_control_func()
699 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
702 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
712 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
714 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
715 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
722 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
724 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
725 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
726 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
737 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
740 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
760 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
776 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
777 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
778 ls->ls_recover_start == start_gen) { in gfs2_control_func()
779 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
780 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
785 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
786 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
792 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount() local
798 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
799 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
800 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
801 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
802 init_completion(&ls->ls_sync_wait); in control_mount()
804 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
898 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
909 spin_lock(&ls->ls_recover_spin); in control_mount()
910 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
911 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
912 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
913 spin_unlock(&ls->ls_recover_spin); in control_mount()
928 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
934 spin_lock(&ls->ls_recover_spin); in control_mount()
935 block_gen = ls->ls_recover_block; in control_mount()
936 start_gen = ls->ls_recover_start; in control_mount()
937 mount_gen = ls->ls_recover_mount; in control_mount()
950 ls->ls_recover_flags); in control_mount()
952 spin_unlock(&ls->ls_recover_spin); in control_mount()
961 lvb_gen, ls->ls_recover_flags); in control_mount()
962 spin_unlock(&ls->ls_recover_spin); in control_mount()
970 lvb_gen, ls->ls_recover_flags); in control_mount()
971 spin_unlock(&ls->ls_recover_spin); in control_mount()
975 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
976 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
977 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
978 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
979 spin_unlock(&ls->ls_recover_spin); in control_mount()
990 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done() local
995 spin_lock(&ls->ls_recover_spin); in control_first_done()
996 start_gen = ls->ls_recover_start; in control_first_done()
997 block_gen = ls->ls_recover_block; in control_first_done()
999 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
1000 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
1001 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
1004 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
1005 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1018 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1021 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
1026 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
1027 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
1028 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1029 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1030 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1032 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
1033 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1057 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size() local
1063 if (!ls->ls_lvb_bits) { in set_recover_size()
1064 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1065 if (!ls->ls_lvb_bits) in set_recover_size()
1075 old_size = ls->ls_recover_size; in set_recover_size()
1090 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1091 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1092 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1093 kfree(ls->ls_recover_submit); in set_recover_size()
1094 kfree(ls->ls_recover_result); in set_recover_size()
1095 ls->ls_recover_submit = submit; in set_recover_size()
1096 ls->ls_recover_result = result; in set_recover_size()
1097 ls->ls_recover_size = new_size; in set_recover_size()
1098 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1102 static void free_recover_size(struct lm_lockstruct *ls) in free_recover_size() argument
1104 kfree(ls->ls_lvb_bits); in free_recover_size()
1105 kfree(ls->ls_recover_submit); in free_recover_size()
1106 kfree(ls->ls_recover_result); in free_recover_size()
1107 ls->ls_recover_submit = NULL; in free_recover_size()
1108 ls->ls_recover_result = NULL; in free_recover_size()
1109 ls->ls_recover_size = 0; in free_recover_size()
1110 ls->ls_lvb_bits = NULL; in free_recover_size()
1118 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep() local
1124 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1125 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1126 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1128 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1129 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1130 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1133 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1134 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1143 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot() local
1151 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1152 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1154 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1155 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1159 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1161 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1163 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1164 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1173 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done() local
1179 /* ensure the ls jid arrays are large enough */ in gdlm_recover_done()
1182 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1183 ls->ls_recover_start = generation; in gdlm_recover_done()
1185 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1186 ls->ls_recover_mount = generation; in gdlm_recover_done()
1187 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1190 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1193 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1195 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1196 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1204 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result() local
1211 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1215 if (jid == ls->ls_jid) in gdlm_recovery_result()
1218 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1219 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1220 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1223 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1225 jid, ls->ls_recover_size); in gdlm_recovery_result()
1226 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1233 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1239 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1242 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1253 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount() local
1264 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1265 ls->ls_recover_flags = 0; in gdlm_mount()
1266 ls->ls_recover_mount = 0; in gdlm_mount()
1267 ls->ls_recover_start = 0; in gdlm_mount()
1268 ls->ls_recover_block = 0; in gdlm_mount()
1269 ls->ls_recover_size = 0; in gdlm_mount()
1270 ls->ls_recover_submit = NULL; in gdlm_mount()
1271 ls->ls_recover_result = NULL; in gdlm_mount()
1272 ls->ls_lvb_bits = NULL; in gdlm_mount()
1300 &ls->ls_dlm); in gdlm_mount()
1312 free_recover_size(ls); in gdlm_mount()
1313 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1334 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1341 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_mount()
1343 free_recover_size(ls); in gdlm_mount()
1350 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done() local
1353 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1363 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount() local
1365 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1370 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1371 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1372 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1377 if (ls->ls_dlm) { in gdlm_unmount()
1378 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_unmount()
1379 ls->ls_dlm = NULL; in gdlm_unmount()
1382 free_recover_size(ls); in gdlm_unmount()