Lines Matching +full:inactive +full:- +full:delay +full:- +full:ms
1 // SPDX-License-Identifier: GPL-2.0-only
5 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
52 L: send_xxxx() -> R: receive_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
96 * Lock compatibilty matrix - thanks Steve
121 * -1 = nothing happens to the LVB
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, in dlm_print_lkb()
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode, in dlm_print_lkb()
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid, in dlm_print_lkb()
169 (unsigned long long)lkb->lkb_recover_seq); in dlm_print_lkb()
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid, in dlm_print_rsb()
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count, in dlm_print_rsb()
178 r->res_name); in dlm_print_rsb()
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list)); in dlm_dump_rsb()
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) in dlm_dump_rsb()
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) in dlm_dump_rsb()
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) in dlm_dump_rsb()
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) in dlm_dump_rsb()
207 down_read(&ls->ls_in_recovery); in dlm_lock_recovery()
212 up_read(&ls->ls_in_recovery); in dlm_unlock_recovery()
217 return down_read_trylock(&ls->ls_in_recovery); in dlm_lock_recovery_try()
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE); in can_be_queued()
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST); in force_blocking_asts()
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED); in is_demoted()
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE); in is_altmode()
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED); in is_granted()
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r);); in is_remote()
248 return !!r->res_nodeid; in is_remote()
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY)); in is_process_copy()
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0; in is_master_copy()
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) || in middle_conversion()
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW)) in middle_conversion()
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode); in down_conversion()
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK; in is_overlap_unlock()
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL; in is_overlap_cancel()
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK | in is_overlap()
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb);); in queue_cast()
300 /* if the operation was a cancel, then return -DLM_ECANCEL, if a in queue_cast()
301 timeout caused the cancel then return -ETIMEDOUT */ in queue_cast()
302 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) { in queue_cast()
303 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL; in queue_cast()
304 rv = -ETIMEDOUT; in queue_cast()
308 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) { in queue_cast()
309 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL; in queue_cast()
310 rv = -EDEADLK; in queue_cast()
313 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags); in queue_cast()
319 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL); in queue_cast_overlap()
340 kref_get(&r->res_ref); in hold_rsb()
353 struct dlm_ls *ls = r->res_ls; in put_rsb()
354 uint32_t bucket = r->res_bucket; in put_rsb()
357 rv = kref_put_lock(&r->res_ref, toss_rsb, in put_rsb()
358 &ls->ls_rsbtbl[bucket].lock); in put_rsb()
360 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in put_rsb()
373 spin_lock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
374 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { in pre_rsb_struct()
375 spin_unlock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
378 spin_unlock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
383 spin_lock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
385 list_add(&r1->res_hashchain, &ls->ls_new_rsb); in pre_rsb_struct()
386 ls->ls_new_rsb_count++; in pre_rsb_struct()
389 list_add(&r2->res_hashchain, &ls->ls_new_rsb); in pre_rsb_struct()
390 ls->ls_new_rsb_count++; in pre_rsb_struct()
392 count = ls->ls_new_rsb_count; in pre_rsb_struct()
393 spin_unlock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
396 return -ENOMEM; in pre_rsb_struct()
400 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
410 spin_lock(&ls->ls_new_rsb_spin); in get_rsb_struct()
411 if (list_empty(&ls->ls_new_rsb)) { in get_rsb_struct()
412 count = ls->ls_new_rsb_count; in get_rsb_struct()
413 spin_unlock(&ls->ls_new_rsb_spin); in get_rsb_struct()
417 return -EAGAIN; in get_rsb_struct()
420 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); in get_rsb_struct()
421 list_del(&r->res_hashchain); in get_rsb_struct()
423 memset(&r->res_hashnode, 0, sizeof(struct rb_node)); in get_rsb_struct()
424 ls->ls_new_rsb_count--; in get_rsb_struct()
425 spin_unlock(&ls->ls_new_rsb_spin); in get_rsb_struct()
427 r->res_ls = ls; in get_rsb_struct()
428 r->res_length = len; in get_rsb_struct()
429 memcpy(r->res_name, name, len); in get_rsb_struct()
430 mutex_init(&r->res_mutex); in get_rsb_struct()
432 INIT_LIST_HEAD(&r->res_lookup); in get_rsb_struct()
433 INIT_LIST_HEAD(&r->res_grantqueue); in get_rsb_struct()
434 INIT_LIST_HEAD(&r->res_convertqueue); in get_rsb_struct()
435 INIT_LIST_HEAD(&r->res_waitqueue); in get_rsb_struct()
436 INIT_LIST_HEAD(&r->res_root_list); in get_rsb_struct()
437 INIT_LIST_HEAD(&r->res_recover_list); in get_rsb_struct()
449 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN); in rsb_cmp()
455 struct rb_node *node = tree->rb_node; in dlm_search_rsb_tree()
463 node = node->rb_left; in dlm_search_rsb_tree()
465 node = node->rb_right; in dlm_search_rsb_tree()
470 return -EBADR; in dlm_search_rsb_tree()
479 struct rb_node **newn = &tree->rb_node; in rsb_insert()
488 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length); in rsb_insert()
490 newn = &parent->rb_left; in rsb_insert()
492 newn = &parent->rb_right; in rsb_insert()
497 return -EEXIST; in rsb_insert()
501 rb_link_node(&rsb->res_hashnode, parent, newn); in rsb_insert()
502 rb_insert_color(&rsb->res_hashnode, tree); in rsb_insert()
513 * to excessive master lookups and removals if we don't delay the release.
523 * - previously used locally but not any more (were on keep list, then
525 * - created and put on toss list as a directory record for a lookup
599 spin_lock(&ls->ls_rsbtbl[b].lock); in find_rsb_dir()
601 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in find_rsb_dir()
609 kref_get(&r->res_ref); in find_rsb_dir()
614 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in find_rsb_dir()
619 * rsb found inactive (master_nodeid may be out of date unless in find_rsb_dir()
625 if ((r->res_master_nodeid != our_nodeid) && from_other) { in find_rsb_dir()
629 from_nodeid, r->res_master_nodeid, dir_nodeid, in find_rsb_dir()
630 r->res_name); in find_rsb_dir()
631 error = -ENOTBLK; in find_rsb_dir()
635 if ((r->res_master_nodeid != our_nodeid) && from_dir) { in find_rsb_dir()
638 from_nodeid, r->res_master_nodeid); in find_rsb_dir()
641 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
642 r->res_nodeid = 0; in find_rsb_dir()
644 r->res_first_lkid = 0; in find_rsb_dir()
647 if (from_local && (r->res_master_nodeid != our_nodeid)) { in find_rsb_dir()
651 r->res_first_lkid = 0; in find_rsb_dir()
654 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in find_rsb_dir()
655 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_dir()
664 if (error == -EBADR && !create) in find_rsb_dir()
668 if (error == -EAGAIN) { in find_rsb_dir()
669 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_dir()
675 r->res_hash = hash; in find_rsb_dir()
676 r->res_bucket = b; in find_rsb_dir()
677 r->res_dir_nodeid = dir_nodeid; in find_rsb_dir()
678 kref_init(&r->res_ref); in find_rsb_dir()
683 from_nodeid, r->res_name); in find_rsb_dir()
684 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
685 r->res_nodeid = 0; in find_rsb_dir()
692 from_nodeid, dir_nodeid, our_nodeid, r->res_name); in find_rsb_dir()
695 error = -ENOTBLK; in find_rsb_dir()
701 from_nodeid, dir_nodeid, r->res_name); in find_rsb_dir()
707 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
708 r->res_nodeid = 0; in find_rsb_dir()
711 r->res_master_nodeid = 0; in find_rsb_dir()
712 r->res_nodeid = -1; in find_rsb_dir()
716 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_dir()
718 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_dir()
743 spin_lock(&ls->ls_rsbtbl[b].lock); in find_rsb_nodir()
745 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in find_rsb_nodir()
753 kref_get(&r->res_ref); in find_rsb_nodir()
758 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in find_rsb_nodir()
763 * rsb found inactive. No other thread is using this rsb because in find_rsb_nodir()
768 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) { in find_rsb_nodir()
772 from_nodeid, r->res_master_nodeid, dir_nodeid); in find_rsb_nodir()
774 error = -ENOTBLK; in find_rsb_nodir()
778 if (!recover && (r->res_master_nodeid != our_nodeid) && in find_rsb_nodir()
783 our_nodeid, r->res_master_nodeid, dir_nodeid); in find_rsb_nodir()
785 r->res_master_nodeid = our_nodeid; in find_rsb_nodir()
786 r->res_nodeid = 0; in find_rsb_nodir()
789 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in find_rsb_nodir()
790 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_nodir()
800 if (error == -EAGAIN) { in find_rsb_nodir()
801 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_nodir()
807 r->res_hash = hash; in find_rsb_nodir()
808 r->res_bucket = b; in find_rsb_nodir()
809 r->res_dir_nodeid = dir_nodeid; in find_rsb_nodir()
810 r->res_master_nodeid = dir_nodeid; in find_rsb_nodir()
811 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; in find_rsb_nodir()
812 kref_init(&r->res_ref); in find_rsb_nodir()
814 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_nodir()
816 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_nodir()
830 return -EINVAL; in find_rsb()
833 b = hash & (ls->ls_rsbtbl_size - 1); in find_rsb()
853 from_nodeid, r->res_master_nodeid, in validate_master_nodeid()
854 r->res_dir_nodeid); in validate_master_nodeid()
856 return -ENOTBLK; in validate_master_nodeid()
859 if (from_nodeid != r->res_dir_nodeid) { in validate_master_nodeid()
862 master_nodeid is zero, so limit debug to non-zero. */ in validate_master_nodeid()
864 if (r->res_master_nodeid) { in validate_master_nodeid()
867 r->res_master_nodeid, r->res_dir_nodeid, in validate_master_nodeid()
868 r->res_first_lkid, r->res_name); in validate_master_nodeid()
870 return -ENOTBLK; in validate_master_nodeid()
873 request; this could happen with master 0 / res_nodeid -1 */ in validate_master_nodeid()
875 if (r->res_master_nodeid) { in validate_master_nodeid()
878 from_nodeid, r->res_master_nodeid, in validate_master_nodeid()
879 r->res_first_lkid, r->res_name); in validate_master_nodeid()
882 r->res_master_nodeid = dlm_our_nodeid(); in validate_master_nodeid()
883 r->res_nodeid = 0; in validate_master_nodeid()
895 if (r->res_dir_nodeid != our_nodeid) { in __dlm_master_lookup()
898 r->res_dir_nodeid, our_nodeid, r->res_name); in __dlm_master_lookup()
899 r->res_dir_nodeid = our_nodeid; in __dlm_master_lookup()
902 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) { in __dlm_master_lookup()
909 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
910 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
920 if (from_master && (r->res_master_nodeid != from_nodeid)) { in __dlm_master_lookup()
927 __func__, from_nodeid, r->res_master_nodeid, in __dlm_master_lookup()
928 r->res_nodeid, r->res_first_lkid, r->res_name); in __dlm_master_lookup()
930 if (r->res_master_nodeid == our_nodeid) { in __dlm_master_lookup()
936 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
937 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
941 if (!r->res_master_nodeid) { in __dlm_master_lookup()
947 from_nodeid, r->res_first_lkid, r->res_name); in __dlm_master_lookup()
948 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
949 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
953 (r->res_master_nodeid == from_nodeid)) { in __dlm_master_lookup()
960 __func__, from_nodeid, flags, r->res_first_lkid, in __dlm_master_lookup()
961 r->res_name); in __dlm_master_lookup()
965 *r_nodeid = r->res_master_nodeid; in __dlm_master_lookup()
1008 return -EINVAL; in dlm_master_lookup()
1013 return -EINVAL; in dlm_master_lookup()
1017 b = hash & (ls->ls_rsbtbl_size - 1); in dlm_master_lookup()
1023 ls->ls_num_nodes); in dlm_master_lookup()
1024 *r_nodeid = -1; in dlm_master_lookup()
1025 return -EINVAL; in dlm_master_lookup()
1033 spin_lock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1034 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in dlm_master_lookup()
1041 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1054 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in dlm_master_lookup()
1058 /* because the rsb is inactive (on toss list), it's not refcounted in dlm_master_lookup()
1065 r->res_toss_time = jiffies; in dlm_master_lookup()
1066 /* the rsb was inactive (on toss list) */ in dlm_master_lookup()
1067 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1073 if (error == -EAGAIN) { in dlm_master_lookup()
1074 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1080 r->res_hash = hash; in dlm_master_lookup()
1081 r->res_bucket = b; in dlm_master_lookup()
1082 r->res_dir_nodeid = our_nodeid; in dlm_master_lookup()
1083 r->res_master_nodeid = from_nodeid; in dlm_master_lookup()
1084 r->res_nodeid = from_nodeid; in dlm_master_lookup()
1085 kref_init(&r->res_ref); in dlm_master_lookup()
1086 r->res_toss_time = jiffies; in dlm_master_lookup()
1088 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss); in dlm_master_lookup()
1092 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1100 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1110 for (i = 0; i < ls->ls_rsbtbl_size; i++) { in dlm_dump_rsb_hash()
1111 spin_lock(&ls->ls_rsbtbl[i].lock); in dlm_dump_rsb_hash()
1112 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { in dlm_dump_rsb_hash()
1114 if (r->res_hash == hash) in dlm_dump_rsb_hash()
1117 spin_unlock(&ls->ls_rsbtbl[i].lock); in dlm_dump_rsb_hash()
1128 b = hash & (ls->ls_rsbtbl_size - 1); in dlm_dump_rsb_name()
1130 spin_lock(&ls->ls_rsbtbl[b].lock); in dlm_dump_rsb_name()
1131 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in dlm_dump_rsb_name()
1135 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in dlm_dump_rsb_name()
1141 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_dump_rsb_name()
1147 struct dlm_ls *ls = r->res_ls; in toss_rsb()
1149 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); in toss_rsb()
1150 kref_init(&r->res_ref); in toss_rsb()
1151 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep); in toss_rsb()
1152 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss); in toss_rsb()
1153 r->res_toss_time = jiffies; in toss_rsb()
1154 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK; in toss_rsb()
1155 if (r->res_lvbptr) { in toss_rsb()
1156 dlm_free_lvb(r->res_lvbptr); in toss_rsb()
1157 r->res_lvbptr = NULL; in toss_rsb()
1166 rv = kref_put(&r->res_ref, toss_rsb); in unhold_rsb()
1177 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); in kill_rsb()
1178 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); in kill_rsb()
1179 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r);); in kill_rsb()
1180 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); in kill_rsb()
1181 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); in kill_rsb()
1182 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); in kill_rsb()
1191 lkb->lkb_resource = r; in attach_lkb()
1196 if (lkb->lkb_resource) { in detach_lkb()
1197 put_rsb(lkb->lkb_resource); in detach_lkb()
1198 lkb->lkb_resource = NULL; in detach_lkb()
1210 return -ENOMEM; in _create_lkb()
1212 lkb->lkb_nodeid = -1; in _create_lkb()
1213 lkb->lkb_grmode = DLM_LOCK_IV; in _create_lkb()
1214 kref_init(&lkb->lkb_ref); in _create_lkb()
1215 INIT_LIST_HEAD(&lkb->lkb_ownqueue); in _create_lkb()
1216 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); in _create_lkb()
1218 INIT_LIST_HEAD(&lkb->lkb_time_list); in _create_lkb()
1220 INIT_LIST_HEAD(&lkb->lkb_cb_list); in _create_lkb()
1221 mutex_init(&lkb->lkb_cb_mutex); in _create_lkb()
1222 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); in _create_lkb()
1225 spin_lock(&ls->ls_lkbidr_spin); in _create_lkb()
1226 rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); in _create_lkb()
1228 lkb->lkb_id = rv; in _create_lkb()
1229 spin_unlock(&ls->ls_lkbidr_spin); in _create_lkb()
1251 spin_lock(&ls->ls_lkbidr_spin); in find_lkb()
1252 lkb = idr_find(&ls->ls_lkbidr, lkid); in find_lkb()
1254 kref_get(&lkb->lkb_ref); in find_lkb()
1255 spin_unlock(&ls->ls_lkbidr_spin); in find_lkb()
1258 return lkb ? 0 : -ENOENT; in find_lkb()
1268 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); in kill_lkb()
1276 uint32_t lkid = lkb->lkb_id; in __put_lkb()
1279 rv = kref_put_lock(&lkb->lkb_ref, kill_lkb, in __put_lkb()
1280 &ls->ls_lkbidr_spin); in __put_lkb()
1282 idr_remove(&ls->ls_lkbidr, lkid); in __put_lkb()
1283 spin_unlock(&ls->ls_lkbidr_spin); in __put_lkb()
1288 if (lkb->lkb_lvbptr && is_master_copy(lkb)) in __put_lkb()
1289 dlm_free_lvb(lkb->lkb_lvbptr); in __put_lkb()
1300 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb);); in dlm_put_lkb()
1301 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb);); in dlm_put_lkb()
1303 ls = lkb->lkb_resource->res_ls; in dlm_put_lkb()
1312 kref_get(&lkb->lkb_ref); in hold_lkb()
1329 kref_put(&lkb->lkb_ref, unhold_lkb_assert); in unhold_lkb()
1338 if (iter->lkb_rqmode < mode) { in lkb_add_ordered()
1340 list_add_tail(new, &iter->lkb_statequeue); in lkb_add_ordered()
1352 kref_get(&lkb->lkb_ref); in add_lkb()
1354 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); in add_lkb()
1356 lkb->lkb_timestamp = ktime_get(); in add_lkb()
1358 lkb->lkb_status = status; in add_lkb()
1362 if (lkb->lkb_exflags & DLM_LKF_HEADQUE) in add_lkb()
1363 list_add(&lkb->lkb_statequeue, &r->res_waitqueue); in add_lkb()
1365 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue); in add_lkb()
1369 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue, in add_lkb()
1370 lkb->lkb_grmode); in add_lkb()
1373 if (lkb->lkb_exflags & DLM_LKF_HEADQUE) in add_lkb()
1374 list_add(&lkb->lkb_statequeue, &r->res_convertqueue); in add_lkb()
1376 list_add_tail(&lkb->lkb_statequeue, in add_lkb()
1377 &r->res_convertqueue); in add_lkb()
1386 lkb->lkb_status = 0; in del_lkb()
1387 list_del(&lkb->lkb_statequeue); in del_lkb()
1413 return -1; in msg_reply_type()
1421 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in add_to_waiters()
1424 mutex_lock(&ls->ls_waiters_mutex); in add_to_waiters()
1428 error = -EINVAL; in add_to_waiters()
1432 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) { in add_to_waiters()
1435 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK; in add_to_waiters()
1438 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL; in add_to_waiters()
1441 error = -EBUSY; in add_to_waiters()
1444 lkb->lkb_wait_count++; in add_to_waiters()
1448 lkb->lkb_id, lkb->lkb_wait_type, mstype, in add_to_waiters()
1449 lkb->lkb_wait_count, lkb->lkb_flags); in add_to_waiters()
1453 DLM_ASSERT(!lkb->lkb_wait_count, in add_to_waiters()
1455 printk("wait_count %d\n", lkb->lkb_wait_count);); in add_to_waiters()
1457 lkb->lkb_wait_count++; in add_to_waiters()
1458 lkb->lkb_wait_type = mstype; in add_to_waiters()
1459 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */ in add_to_waiters()
1461 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); in add_to_waiters()
1465 lkb->lkb_id, error, lkb->lkb_flags, mstype, in add_to_waiters()
1466 lkb->lkb_wait_type, lkb->lkb_resource->res_name); in add_to_waiters()
1467 mutex_unlock(&ls->ls_waiters_mutex); in add_to_waiters()
1477 struct dlm_message *ms) in _remove_from_waiters() argument
1479 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in _remove_from_waiters()
1483 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); in _remove_from_waiters()
1484 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; in _remove_from_waiters()
1490 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); in _remove_from_waiters()
1491 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; in _remove_from_waiters()
1500 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) { in _remove_from_waiters()
1502 lkb->lkb_id, lkb->lkb_wait_type); in _remove_from_waiters()
1503 return -1; in _remove_from_waiters()
1512 lingering state of the cancel and fail with -EBUSY. */ in _remove_from_waiters()
1515 (lkb->lkb_wait_type == DLM_MSG_CONVERT) && in _remove_from_waiters()
1516 is_overlap_cancel(lkb) && ms && !ms->m_result) { in _remove_from_waiters()
1518 lkb->lkb_id); in _remove_from_waiters()
1519 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1520 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; in _remove_from_waiters()
1521 lkb->lkb_wait_count--; in _remove_from_waiters()
1527 msg due to lookup->request optimization, verify others? */ in _remove_from_waiters()
1529 if (lkb->lkb_wait_type) { in _remove_from_waiters()
1530 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1535 lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0, in _remove_from_waiters()
1536 lkb->lkb_remid, mstype, lkb->lkb_flags); in _remove_from_waiters()
1537 return -1; in _remove_from_waiters()
1540 /* the force-unlock/cancel has completed and we haven't recvd a reply in _remove_from_waiters()
1545 if (overlap_done && lkb->lkb_wait_type) { in _remove_from_waiters()
1547 lkb->lkb_id, mstype, lkb->lkb_wait_type); in _remove_from_waiters()
1548 lkb->lkb_wait_count--; in _remove_from_waiters()
1550 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1553 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb);); in _remove_from_waiters()
1555 lkb->lkb_flags &= ~DLM_IFL_RESEND; in _remove_from_waiters()
1556 lkb->lkb_wait_count--; in _remove_from_waiters()
1557 if (!lkb->lkb_wait_count) in _remove_from_waiters()
1558 list_del_init(&lkb->lkb_wait_reply); in _remove_from_waiters()
1565 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in remove_from_waiters()
1568 mutex_lock(&ls->ls_waiters_mutex); in remove_from_waiters()
1570 mutex_unlock(&ls->ls_waiters_mutex); in remove_from_waiters()
1577 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms) in remove_from_waiters_ms() argument
1579 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in remove_from_waiters_ms()
1582 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS)) in remove_from_waiters_ms()
1583 mutex_lock(&ls->ls_waiters_mutex); in remove_from_waiters_ms()
1584 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); in remove_from_waiters_ms()
1585 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS)) in remove_from_waiters_ms()
1586 mutex_unlock(&ls->ls_waiters_mutex); in remove_from_waiters_ms()
1595 (ls->ls_remove_len && \
1596 !rsb_cmp(r, ls->ls_remove_name, \
1597 ls->ls_remove_len))
1601 struct dlm_ls *ls = r->res_ls; in wait_pending_remove()
1603 spin_lock(&ls->ls_remove_spin); in wait_pending_remove()
1605 log_debug(ls, "delay lookup for remove dir %d %s", in wait_pending_remove()
1606 r->res_dir_nodeid, r->res_name); in wait_pending_remove()
1607 spin_unlock(&ls->ls_remove_spin); in wait_pending_remove()
1608 wait_event(ls->ls_remove_wait, !DLM_WAIT_PENDING_COND(ls, r)); in wait_pending_remove()
1611 spin_unlock(&ls->ls_remove_spin); in wait_pending_remove()
1631 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); in shrink_bucket()
1633 spin_lock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1635 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) { in shrink_bucket()
1636 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1640 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { in shrink_bucket()
1650 (r->res_master_nodeid != our_nodeid) && in shrink_bucket()
1657 if (!time_after_eq(jiffies, r->res_toss_time + in shrink_bucket()
1663 (r->res_master_nodeid == our_nodeid) && in shrink_bucket()
1670 ls->ls_remove_lens[remote_count] = r->res_length; in shrink_bucket()
1671 memcpy(ls->ls_remove_names[remote_count], r->res_name, in shrink_bucket()
1680 if (!kref_put(&r->res_ref, kill_rsb)) { in shrink_bucket()
1681 log_error(ls, "tossed rsb in use %s", r->res_name); in shrink_bucket()
1685 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in shrink_bucket()
1690 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK; in shrink_bucket()
1692 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK; in shrink_bucket()
1693 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1710 name = ls->ls_remove_names[i]; in shrink_bucket()
1711 len = ls->ls_remove_lens[i]; in shrink_bucket()
1713 spin_lock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1714 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in shrink_bucket()
1716 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1721 if (r->res_master_nodeid != our_nodeid) { in shrink_bucket()
1722 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1724 r->res_master_nodeid, r->res_dir_nodeid, in shrink_bucket()
1729 if (r->res_dir_nodeid == our_nodeid) { in shrink_bucket()
1731 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1733 r->res_dir_nodeid, r->res_master_nodeid, in shrink_bucket()
1738 if (!time_after_eq(jiffies, r->res_toss_time + in shrink_bucket()
1740 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1742 r->res_toss_time, jiffies, name); in shrink_bucket()
1746 if (!kref_put(&r->res_ref, kill_rsb)) { in shrink_bucket()
1747 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1752 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in shrink_bucket()
1755 spin_lock(&ls->ls_remove_spin); in shrink_bucket()
1756 ls->ls_remove_len = len; in shrink_bucket()
1757 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); in shrink_bucket()
1758 spin_unlock(&ls->ls_remove_spin); in shrink_bucket()
1759 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1764 spin_lock(&ls->ls_remove_spin); in shrink_bucket()
1765 ls->ls_remove_len = 0; in shrink_bucket()
1766 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); in shrink_bucket()
1767 spin_unlock(&ls->ls_remove_spin); in shrink_bucket()
1768 wake_up(&ls->ls_remove_wait); in shrink_bucket()
1778 for (i = 0; i < ls->ls_rsbtbl_size; i++) { in dlm_scan_rsbs()
1789 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in add_timeout()
1794 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) && in add_timeout()
1795 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { in add_timeout()
1796 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN; in add_timeout()
1799 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT) in add_timeout()
1804 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb);); in add_timeout()
1805 mutex_lock(&ls->ls_timeout_mutex); in add_timeout()
1807 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout); in add_timeout()
1808 mutex_unlock(&ls->ls_timeout_mutex); in add_timeout()
1813 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in del_timeout()
1815 mutex_lock(&ls->ls_timeout_mutex); in del_timeout()
1816 if (!list_empty(&lkb->lkb_time_list)) { in del_timeout()
1817 list_del_init(&lkb->lkb_time_list); in del_timeout()
1820 mutex_unlock(&ls->ls_timeout_mutex); in del_timeout()
1826 to specify some special timeout-related bits in the lkb that are just to
1842 mutex_lock(&ls->ls_timeout_mutex); in dlm_scan_timeout()
1843 list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) { in dlm_scan_timeout()
1846 iter->lkb_timestamp)); in dlm_scan_timeout()
1848 if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) && in dlm_scan_timeout()
1849 wait_us >= (iter->lkb_timeout_cs * 10000)) in dlm_scan_timeout()
1852 if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && in dlm_scan_timeout()
1862 mutex_unlock(&ls->ls_timeout_mutex); in dlm_scan_timeout()
1867 r = lkb->lkb_resource; in dlm_scan_timeout()
1873 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN; in dlm_scan_timeout()
1874 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT)) in dlm_scan_timeout()
1881 lkb->lkb_id, lkb->lkb_nodeid, r->res_name); in dlm_scan_timeout()
1882 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN; in dlm_scan_timeout()
1883 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL; in dlm_scan_timeout()
1900 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin); in dlm_adjust_timeouts()
1902 ls->ls_recover_begin = 0; in dlm_adjust_timeouts()
1903 mutex_lock(&ls->ls_timeout_mutex); in dlm_adjust_timeouts()
1904 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) in dlm_adjust_timeouts()
1905 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us); in dlm_adjust_timeouts()
1906 mutex_unlock(&ls->ls_timeout_mutex); in dlm_adjust_timeouts()
1917 int b, len = r->res_ls->ls_lvblen; in set_lvb_lock()
1921 b=-1 do nothing */ in set_lvb_lock()
1923 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; in set_lvb_lock()
1926 if (!lkb->lkb_lvbptr) in set_lvb_lock()
1929 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock()
1932 if (!r->res_lvbptr) in set_lvb_lock()
1935 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len); in set_lvb_lock()
1936 lkb->lkb_lvbseq = r->res_lvbseq; in set_lvb_lock()
1939 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { in set_lvb_lock()
1944 if (!lkb->lkb_lvbptr) in set_lvb_lock()
1947 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock()
1950 if (!r->res_lvbptr) in set_lvb_lock()
1951 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); in set_lvb_lock()
1953 if (!r->res_lvbptr) in set_lvb_lock()
1956 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len); in set_lvb_lock()
1957 r->res_lvbseq++; in set_lvb_lock()
1958 lkb->lkb_lvbseq = r->res_lvbseq; in set_lvb_lock()
1963 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID; in set_lvb_lock()
1968 if (lkb->lkb_grmode < DLM_LOCK_PW) in set_lvb_unlock()
1971 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { in set_lvb_unlock()
1976 if (!lkb->lkb_lvbptr) in set_lvb_unlock()
1979 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_unlock()
1982 if (!r->res_lvbptr) in set_lvb_unlock()
1983 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); in set_lvb_unlock()
1985 if (!r->res_lvbptr) in set_lvb_unlock()
1988 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); in set_lvb_unlock()
1989 r->res_lvbseq++; in set_lvb_unlock()
1996 struct dlm_message *ms) in set_lvb_lock_pc() argument
2000 if (!lkb->lkb_lvbptr) in set_lvb_lock_pc()
2003 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock_pc()
2006 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; in set_lvb_lock_pc()
2008 int len = receive_extralen(ms); in set_lvb_lock_pc()
2009 if (len > r->res_ls->ls_lvblen) in set_lvb_lock_pc()
2010 len = r->res_ls->ls_lvblen; in set_lvb_lock_pc()
2011 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); in set_lvb_lock_pc()
2012 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); in set_lvb_lock_pc()
2017 remove_lock -- used for unlock, removes lkb from granted
2018 revert_lock -- used for cancel, moves lkb from convert to granted
2019 grant_lock -- used for request and convert, adds lkb to granted or
2029 lkb->lkb_grmode = DLM_LOCK_IV; in _remove_lock()
2048 -1 removed lock */
2054 lkb->lkb_rqmode = DLM_LOCK_IV; in revert_lock()
2056 switch (lkb->lkb_status) { in revert_lock()
2065 lkb->lkb_grmode = DLM_LOCK_IV; in revert_lock()
2069 rv = -1; in revert_lock()
2072 log_print("invalid status for revert %d", lkb->lkb_status); in revert_lock()
2084 if (lkb->lkb_grmode != lkb->lkb_rqmode) { in _grant_lock()
2085 lkb->lkb_grmode = lkb->lkb_rqmode; in _grant_lock()
2086 if (lkb->lkb_status) in _grant_lock()
2092 lkb->lkb_rqmode = DLM_LOCK_IV; in _grant_lock()
2093 lkb->lkb_highbast = 0; in _grant_lock()
2103 struct dlm_message *ms) in grant_lock_pc() argument
2105 set_lvb_lock_pc(r, lkb, ms); in grant_lock_pc()
2132 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) { in munge_demoted()
2134 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode); in munge_demoted()
2138 lkb->lkb_grmode = DLM_LOCK_NL; in munge_demoted()
2141 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms) in munge_altmode() argument
2143 if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) && in munge_altmode()
2144 ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) { in munge_altmode()
2146 lkb->lkb_id, le32_to_cpu(ms->m_type)); in munge_altmode()
2150 if (lkb->lkb_exflags & DLM_LKF_ALTPR) in munge_altmode()
2151 lkb->lkb_rqmode = DLM_LOCK_PR; in munge_altmode()
2152 else if (lkb->lkb_exflags & DLM_LKF_ALTCW) in munge_altmode()
2153 lkb->lkb_rqmode = DLM_LOCK_CW; in munge_altmode()
2155 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags); in munge_altmode()
2162 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb, in first_in_list()
2164 if (lkb->lkb_id == first->lkb_id) in first_in_list()
2195 * Convert Queue: NL->EX (first lock)
2196 * PR->EX (second lock)
2200 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2204 * Originally, this function detected conv-deadlk in a more limited scope:
2205 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2206 * - if lkb1 was the first entry in the queue (not just earlier), and was
2211 * That second condition meant we'd only say there was conv-deadlk if
2222 * be zero, i.e. there will never be conv-deadlk between two locks that are
2231 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) { in conversion_deadlock_detect()
2268 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV); in _can_be_granted()
2271 * 6-10: Version 5.4 introduced an option to address the phenomenon of in _can_be_granted()
2274 * 6-11: If the optional EXPEDITE flag is used with the new NL mode in _can_be_granted()
2282 * conversion or used with a non-NL requested mode. We also know an in _can_be_granted()
2285 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can in _can_be_granted()
2289 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE) in _can_be_granted()
2297 if (queue_conflict(&r->res_grantqueue, lkb)) in _can_be_granted()
2301 * 6-3: By default, a conversion request is immediately granted if the in _can_be_granted()
2306 if (queue_conflict(&r->res_convertqueue, lkb)) in _can_be_granted()
2317 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX. in _can_be_granted()
2319 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after in _can_be_granted()
2327 * 6-5: But the default algorithm for deciding whether to grant or in _can_be_granted()
2332 * 6-7: This issue is dealt with by using the optional QUECVT flag with in _can_be_granted()
2349 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT)) in _can_be_granted()
2357 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) { in _can_be_granted()
2358 if (list_empty(&r->res_convertqueue)) in _can_be_granted()
2369 if (lkb->lkb_exflags & DLM_LKF_NOORDER) in _can_be_granted()
2373 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be in _can_be_granted()
2378 if (!now && conv && first_in_list(lkb, &r->res_convertqueue)) in _can_be_granted()
2382 * 6-4: By default, a new request is immediately granted only if all in _can_be_granted()
2385 * - The queue of ungranted conversion requests for the resource is in _can_be_granted()
2387 * - The queue of ungranted new requests for the resource is empty. in _can_be_granted()
2388 * - The mode of the new request is compatible with the most in _can_be_granted()
2392 if (now && !conv && list_empty(&r->res_convertqueue) && in _can_be_granted()
2393 list_empty(&r->res_waitqueue)) in _can_be_granted()
2397 * 6-4: Once a lock request is in the queue of ungranted new requests, in _can_be_granted()
2404 if (!now && !conv && list_empty(&r->res_convertqueue) && in _can_be_granted()
2405 first_in_list(lkb, &r->res_waitqueue)) in _can_be_granted()
2415 int8_t alt = 0, rqmode = lkb->lkb_rqmode; in can_be_granted()
2416 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV); in can_be_granted()
2426 * The CONVDEADLK flag is non-standard and tells the dlm to resolve in can_be_granted()
2433 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) { in can_be_granted()
2434 lkb->lkb_grmode = DLM_LOCK_NL; in can_be_granted()
2435 lkb->lkb_sbflags |= DLM_SBF_DEMOTED; in can_be_granted()
2437 *err = -EDEADLK; in can_be_granted()
2440 lkb->lkb_id, now); in can_be_granted()
2447 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try in can_be_granted()
2453 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR)) in can_be_granted()
2455 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW)) in can_be_granted()
2459 lkb->lkb_rqmode = alt; in can_be_granted()
2462 lkb->lkb_sbflags |= DLM_SBF_ALTMODE; in can_be_granted()
2464 lkb->lkb_rqmode = rqmode; in can_be_granted()
2487 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) { in grant_pending_convert()
2501 lkb->lkb_id, lkb->lkb_nodeid, r->res_name); in grant_pending_convert()
2512 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) { in grant_pending_convert()
2513 if (lkb->lkb_highbast < lkb->lkb_rqmode) { in grant_pending_convert()
2514 queue_bast(r, lkb, lkb->lkb_rqmode); in grant_pending_convert()
2515 lkb->lkb_highbast = lkb->lkb_rqmode; in grant_pending_convert()
2519 lkb->lkb_id, lkb->lkb_nodeid, in grant_pending_convert()
2520 r->res_name); in grant_pending_convert()
2526 hi = max_t(int, lkb->lkb_rqmode, hi); in grant_pending_convert()
2528 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW) in grant_pending_convert()
2547 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) { in grant_pending_wait()
2553 high = max_t(int, lkb->lkb_rqmode, high); in grant_pending_wait()
2554 if (lkb->lkb_rqmode == DLM_LOCK_CW) in grant_pending_wait()
2569 if (gr->lkb_grmode == DLM_LOCK_PR && cw) { in lock_requires_bast()
2570 if (gr->lkb_highbast < DLM_LOCK_EX) in lock_requires_bast()
2575 if (gr->lkb_highbast < high && in lock_requires_bast()
2576 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1]) in lock_requires_bast()
2588 log_print("grant_pending_locks r nodeid %d", r->res_nodeid); in grant_pending_locks()
2605 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { in grant_pending_locks()
2606 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) { in grant_pending_locks()
2608 lkb->lkb_grmode == DLM_LOCK_PR) in grant_pending_locks()
2612 lkb->lkb_highbast = high; in grant_pending_locks()
2619 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) || in modes_require_bast()
2620 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) { in modes_require_bast()
2621 if (gr->lkb_highbast < DLM_LOCK_EX) in modes_require_bast()
2626 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq)) in modes_require_bast()
2640 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { in send_bast_queue()
2641 queue_bast(r, gr, lkb->lkb_rqmode); in send_bast_queue()
2642 gr->lkb_highbast = lkb->lkb_rqmode; in send_bast_queue()
2649 send_bast_queue(r, &r->res_grantqueue, lkb); in send_blocking_asts()
2654 send_bast_queue(r, &r->res_grantqueue, lkb); in send_blocking_asts_all()
2655 send_bast_queue(r, &r->res_convertqueue, lkb); in send_blocking_asts_all()
2658 /* set_master(r, lkb) -- set the master nodeid of a resource
2683 r->res_first_lkid = lkb->lkb_id; in set_master()
2684 lkb->lkb_nodeid = r->res_nodeid; in set_master()
2688 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) { in set_master()
2689 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup); in set_master()
2693 if (r->res_master_nodeid == our_nodeid) { in set_master()
2694 lkb->lkb_nodeid = 0; in set_master()
2698 if (r->res_master_nodeid) { in set_master()
2699 lkb->lkb_nodeid = r->res_master_nodeid; in set_master()
2710 log_debug(r->res_ls, "set_master %x self master %d dir %d %s", in set_master()
2711 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid, in set_master()
2712 r->res_name); in set_master()
2713 r->res_master_nodeid = our_nodeid; in set_master()
2714 r->res_nodeid = 0; in set_master()
2715 lkb->lkb_nodeid = 0; in set_master()
2721 r->res_first_lkid = lkb->lkb_id; in set_master()
2730 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) { in process_lookup_list()
2731 list_del_init(&lkb->lkb_rsb_lookup); in process_lookup_list()
2737 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2743 if (!r->res_first_lkid) in confirm_master()
2748 case -EINPROGRESS: in confirm_master()
2749 r->res_first_lkid = 0; in confirm_master()
2753 case -EAGAIN: in confirm_master()
2754 case -EBADR: in confirm_master()
2755 case -ENOTBLK: in confirm_master()
2760 r->res_first_lkid = 0; in confirm_master()
2762 if (!list_empty(&r->res_lookup)) { in confirm_master()
2763 lkb = list_entry(r->res_lookup.next, struct dlm_lkb, in confirm_master()
2765 list_del_init(&lkb->lkb_rsb_lookup); in confirm_master()
2766 r->res_first_lkid = lkb->lkb_id; in confirm_master()
2772 log_error(r->res_ls, "confirm_master unknown error %d", error); in confirm_master()
2791 int rv = -EINVAL; in set_lock_args()
2828 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr) in set_lock_args()
2831 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid) in set_lock_args()
2838 args->flags = flags; in set_lock_args()
2839 args->astfn = ast; in set_lock_args()
2840 args->astparam = astparam; in set_lock_args()
2841 args->bastfn = bast; in set_lock_args()
2843 args->timeout = timeout_cs; in set_lock_args()
2845 args->mode = mode; in set_lock_args()
2846 args->lksb = lksb; in set_lock_args()
2856 return -EINVAL; in set_unlock_args()
2859 return -EINVAL; in set_unlock_args()
2861 args->flags = flags; in set_unlock_args()
2862 args->astparam = astarg; in set_unlock_args()
2869 int rv = -EBUSY; in validate_lock_args()
2871 if (args->flags & DLM_LKF_CONVERT) { in validate_lock_args()
2872 if (lkb->lkb_status != DLM_LKSTS_GRANTED) in validate_lock_args()
2876 if (lkb->lkb_wait_type || lkb->lkb_wait_count) in validate_lock_args()
2882 rv = -EINVAL; in validate_lock_args()
2883 if (lkb->lkb_flags & DLM_IFL_MSTCPY) in validate_lock_args()
2886 if (args->flags & DLM_LKF_QUECVT && in validate_lock_args()
2887 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) in validate_lock_args()
2891 lkb->lkb_exflags = args->flags; in validate_lock_args()
2892 lkb->lkb_sbflags = 0; in validate_lock_args()
2893 lkb->lkb_astfn = args->astfn; in validate_lock_args()
2894 lkb->lkb_astparam = args->astparam; in validate_lock_args()
2895 lkb->lkb_bastfn = args->bastfn; in validate_lock_args()
2896 lkb->lkb_rqmode = args->mode; in validate_lock_args()
2897 lkb->lkb_lksb = args->lksb; in validate_lock_args()
2898 lkb->lkb_lvbptr = args->lksb->sb_lvbptr; in validate_lock_args()
2899 lkb->lkb_ownpid = (int) current->pid; in validate_lock_args()
2901 lkb->lkb_timeout_cs = args->timeout; in validate_lock_args()
2908 case -EINVAL: in validate_lock_args()
2912 rv, lkb->lkb_id, lkb->lkb_flags, args->flags, in validate_lock_args()
2913 lkb->lkb_status, lkb->lkb_wait_type, in validate_lock_args()
2914 lkb->lkb_resource->res_name); in validate_lock_args()
2918 rv, lkb->lkb_id, lkb->lkb_flags, args->flags, in validate_lock_args()
2919 lkb->lkb_status, lkb->lkb_wait_type, in validate_lock_args()
2920 lkb->lkb_resource->res_name); in validate_lock_args()
2927 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2930 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2936 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in validate_unlock_args()
2937 int rv = -EBUSY; in validate_unlock_args()
2940 if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) && in validate_unlock_args()
2941 (lkb->lkb_wait_type || lkb->lkb_wait_count)) in validate_unlock_args()
2947 if (!list_empty(&lkb->lkb_rsb_lookup)) { in validate_unlock_args()
2948 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) { in validate_unlock_args()
2949 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id); in validate_unlock_args()
2950 list_del_init(&lkb->lkb_rsb_lookup); in validate_unlock_args()
2951 queue_cast(lkb->lkb_resource, lkb, in validate_unlock_args()
2952 args->flags & DLM_LKF_CANCEL ? in validate_unlock_args()
2953 -DLM_ECANCEL : -DLM_EUNLOCK); in validate_unlock_args()
2956 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */ in validate_unlock_args()
2960 rv = -EINVAL; in validate_unlock_args()
2961 if (lkb->lkb_flags & DLM_IFL_MSTCPY) { in validate_unlock_args()
2962 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); in validate_unlock_args()
2972 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) { in validate_unlock_args()
2973 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); in validate_unlock_args()
2974 rv = -ENOENT; in validate_unlock_args()
2980 if (args->flags & DLM_LKF_CANCEL) { in validate_unlock_args()
2981 if (lkb->lkb_exflags & DLM_LKF_CANCEL) in validate_unlock_args()
2990 if (lkb->lkb_flags & DLM_IFL_RESEND) { in validate_unlock_args()
2991 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL; in validate_unlock_args()
2992 rv = -EBUSY; in validate_unlock_args()
2997 if (lkb->lkb_status == DLM_LKSTS_GRANTED && in validate_unlock_args()
2998 !lkb->lkb_wait_type) { in validate_unlock_args()
2999 rv = -EBUSY; in validate_unlock_args()
3003 switch (lkb->lkb_wait_type) { in validate_unlock_args()
3006 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL; in validate_unlock_args()
3007 rv = -EBUSY; in validate_unlock_args()
3017 /* do we need to allow a force-unlock if there's a normal unlock in validate_unlock_args()
3019 fail such that we'd want to send a force-unlock to be sure? */ in validate_unlock_args()
3021 if (args->flags & DLM_LKF_FORCEUNLOCK) { in validate_unlock_args()
3022 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK) in validate_unlock_args()
3031 if (lkb->lkb_flags & DLM_IFL_RESEND) { in validate_unlock_args()
3032 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK; in validate_unlock_args()
3033 rv = -EBUSY; in validate_unlock_args()
3037 switch (lkb->lkb_wait_type) { in validate_unlock_args()
3040 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK; in validate_unlock_args()
3041 rv = -EBUSY; in validate_unlock_args()
3051 lkb->lkb_exflags |= args->flags; in validate_unlock_args()
3052 lkb->lkb_sbflags = 0; in validate_unlock_args()
3053 lkb->lkb_astparam = args->astparam; in validate_unlock_args()
3059 case -EINVAL: in validate_unlock_args()
3063 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags, in validate_unlock_args()
3064 args->flags, lkb->lkb_wait_type, in validate_unlock_args()
3065 lkb->lkb_resource->res_name); in validate_unlock_args()
3069 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags, in validate_unlock_args()
3070 args->flags, lkb->lkb_wait_type, in validate_unlock_args()
3071 lkb->lkb_resource->res_name); in validate_unlock_args()
3096 error = -EINPROGRESS; in do_request()
3102 error = -EAGAIN; in do_request()
3103 queue_cast(r, lkb, -EAGAIN); in do_request()
3112 case -EAGAIN: in do_request_effects()
3116 case -EINPROGRESS: in do_request_effects()
3139 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { in do_convert()
3142 queue_cast(r, lkb, -EDEADLK); in do_convert()
3143 error = -EDEADLK; in do_convert()
3148 to NL, and left us on the granted queue. This auto-demotion in do_convert()
3164 error = -EINPROGRESS; in do_convert()
3171 error = -EAGAIN; in do_convert()
3172 queue_cast(r, lkb, -EAGAIN); in do_convert()
3185 case -EAGAIN: in do_convert_effects()
3189 case -EINPROGRESS: in do_convert_effects()
3198 queue_cast(r, lkb, -DLM_EUNLOCK); in do_unlock()
3199 return -DLM_EUNLOCK; in do_unlock()
3208 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3216 queue_cast(r, lkb, -DLM_ECANCEL); in do_cancel()
3217 return -DLM_ECANCEL; in do_cancel()
3343 lkb->lkb_lksb->sb_lkid = lkb->lkb_id; in request_lock()
3358 r = lkb->lkb_resource; in convert_lock()
3380 r = lkb->lkb_resource; in unlock_lock()
3402 r = lkb->lkb_resource; in cancel_lock()
3440 return -EINVAL; in dlm_lock()
3445 error = find_lkb(ls, lksb->sb_lkid, &lkb); in dlm_lock()
3469 if (error == -EINPROGRESS) in dlm_lock()
3476 if (error == -EAGAIN || error == -EDEADLK) in dlm_lock()
3497 return -EINVAL; in dlm_unlock()
3516 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL) in dlm_unlock()
3518 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK))) in dlm_unlock()
3557 struct dlm_message *ms; in _create_message() local
3567 return -ENOBUFS; in _create_message()
3569 ms = (struct dlm_message *) mb; in _create_message()
3571 ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); in _create_message()
3572 ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); in _create_message()
3573 ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); in _create_message()
3574 ms->m_header.h_length = cpu_to_le16(mb_len); in _create_message()
3575 ms->m_header.h_cmd = DLM_MSG; in _create_message()
3577 ms->m_type = cpu_to_le32(mstype); in _create_message()
3580 *ms_ret = ms; in _create_message()
3595 mb_len += r->res_length; in create_message()
3602 if (lkb && lkb->lkb_lvbptr) in create_message()
3603 mb_len += r->res_ls->ls_lvblen; in create_message()
3607 return _create_message(r->res_ls, mb_len, to_nodeid, mstype, in create_message()
3614 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms) in send_message() argument
3621 struct dlm_message *ms) in send_args() argument
3623 ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid); in send_args()
3624 ms->m_pid = cpu_to_le32(lkb->lkb_ownpid); in send_args()
3625 ms->m_lkid = cpu_to_le32(lkb->lkb_id); in send_args()
3626 ms->m_remid = cpu_to_le32(lkb->lkb_remid); in send_args()
3627 ms->m_exflags = cpu_to_le32(lkb->lkb_exflags); in send_args()
3628 ms->m_sbflags = cpu_to_le32(lkb->lkb_sbflags); in send_args()
3629 ms->m_flags = cpu_to_le32(lkb->lkb_flags); in send_args()
3630 ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); in send_args()
3631 ms->m_status = cpu_to_le32(lkb->lkb_status); in send_args()
3632 ms->m_grmode = cpu_to_le32(lkb->lkb_grmode); in send_args()
3633 ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode); in send_args()
3634 ms->m_hash = cpu_to_le32(r->res_hash); in send_args()
3639 if (lkb->lkb_bastfn) in send_args()
3640 ms->m_asts |= cpu_to_le32(DLM_CB_BAST); in send_args()
3641 if (lkb->lkb_astfn) in send_args()
3642 ms->m_asts |= cpu_to_le32(DLM_CB_CAST); in send_args()
3647 switch (ms->m_type) { in send_args()
3650 memcpy(ms->m_extra, r->res_name, r->res_length); in send_args()
3657 if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK)) in send_args()
3659 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); in send_args()
3666 struct dlm_message *ms; in send_common() local
3670 to_nodeid = r->res_nodeid; in send_common()
3676 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh); in send_common()
3680 send_args(r, lkb, ms); in send_common()
3682 error = send_message(mh, ms); in send_common()
3706 r->res_ls->ls_stub_ms.m_flags = cpu_to_le32(DLM_IFL_STUB_MS); in send_convert()
3707 r->res_ls->ls_stub_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); in send_convert()
3708 r->res_ls->ls_stub_ms.m_result = 0; in send_convert()
3709 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms); in send_convert()
3731 struct dlm_message *ms; in send_grant() local
3735 to_nodeid = lkb->lkb_nodeid; in send_grant()
3737 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh); in send_grant()
3741 send_args(r, lkb, ms); in send_grant()
3743 ms->m_result = 0; in send_grant()
3745 error = send_message(mh, ms); in send_grant()
3752 struct dlm_message *ms; in send_bast() local
3756 to_nodeid = lkb->lkb_nodeid; in send_bast()
3758 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh); in send_bast()
3762 send_args(r, lkb, ms); in send_bast()
3764 ms->m_bastmode = cpu_to_le32(mode); in send_bast()
3766 error = send_message(mh, ms); in send_bast()
3773 struct dlm_message *ms; in send_lookup() local
3783 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh); in send_lookup()
3787 send_args(r, lkb, ms); in send_lookup()
3789 error = send_message(mh, ms); in send_lookup()
3801 struct dlm_message *ms; in send_remove() local
3807 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh); in send_remove()
3811 memcpy(ms->m_extra, r->res_name, r->res_length); in send_remove()
3812 ms->m_hash = cpu_to_le32(r->res_hash); in send_remove()
3814 error = send_message(mh, ms); in send_remove()
3822 struct dlm_message *ms; in send_common_reply() local
3826 to_nodeid = lkb->lkb_nodeid; in send_common_reply()
3828 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh); in send_common_reply()
3832 send_args(r, lkb, ms); in send_common_reply()
3834 ms->m_result = cpu_to_le32(to_dlm_errno(rv)); in send_common_reply()
3836 error = send_message(mh, ms); in send_common_reply()
3864 struct dlm_rsb *r = &ls->ls_stub_rsb; in send_lookup_reply()
3865 struct dlm_message *ms; in send_lookup_reply() local
3867 int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid); in send_lookup_reply()
3869 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh); in send_lookup_reply()
3873 ms->m_lkid = ms_in->m_lkid; in send_lookup_reply()
3874 ms->m_result = cpu_to_le32(to_dlm_errno(rv)); in send_lookup_reply()
3875 ms->m_nodeid = cpu_to_le32(ret_nodeid); in send_lookup_reply()
3877 error = send_message(mh, ms); in send_lookup_reply()
3886 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms) in receive_flags() argument
3888 lkb->lkb_exflags = le32_to_cpu(ms->m_exflags); in receive_flags()
3889 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags); in receive_flags()
3890 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | in receive_flags()
3891 (le32_to_cpu(ms->m_flags) & 0x0000FFFF); in receive_flags()
3894 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms) in receive_flags_reply() argument
3896 if (ms->m_flags == cpu_to_le32(DLM_IFL_STUB_MS)) in receive_flags_reply()
3899 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags); in receive_flags_reply()
3900 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | in receive_flags_reply()
3901 (le32_to_cpu(ms->m_flags) & 0x0000FFFF); in receive_flags_reply()
3904 static int receive_extralen(struct dlm_message *ms) in receive_extralen() argument
3906 return (le16_to_cpu(ms->m_header.h_length) - in receive_extralen()
3911 struct dlm_message *ms) in receive_lvb() argument
3915 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_lvb()
3916 if (!lkb->lkb_lvbptr) in receive_lvb()
3917 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_lvb()
3918 if (!lkb->lkb_lvbptr) in receive_lvb()
3919 return -ENOMEM; in receive_lvb()
3920 len = receive_extralen(ms); in receive_lvb()
3921 if (len > ls->ls_lvblen) in receive_lvb()
3922 len = ls->ls_lvblen; in receive_lvb()
3923 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); in receive_lvb()
3939 struct dlm_message *ms) in receive_request_args() argument
3941 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request_args()
3942 lkb->lkb_ownpid = le32_to_cpu(ms->m_pid); in receive_request_args()
3943 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in receive_request_args()
3944 lkb->lkb_grmode = DLM_LOCK_IV; in receive_request_args()
3945 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); in receive_request_args()
3947 lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL; in receive_request_args()
3948 lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL; in receive_request_args()
3950 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_request_args()
3952 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_request_args()
3953 if (!lkb->lkb_lvbptr) in receive_request_args()
3954 return -ENOMEM; in receive_request_args()
3961 struct dlm_message *ms) in receive_convert_args() argument
3963 if (lkb->lkb_status != DLM_LKSTS_GRANTED) in receive_convert_args()
3964 return -EBUSY; in receive_convert_args()
3966 if (receive_lvb(ls, lkb, ms)) in receive_convert_args()
3967 return -ENOMEM; in receive_convert_args()
3969 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); in receive_convert_args()
3970 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); in receive_convert_args()
3976 struct dlm_message *ms) in receive_unlock_args() argument
3978 if (receive_lvb(ls, lkb, ms)) in receive_unlock_args()
3979 return -ENOMEM; in receive_unlock_args()
3983 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3986 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms) in setup_stub_lkb() argument
3988 struct dlm_lkb *lkb = &ls->ls_stub_lkb; in setup_stub_lkb()
3989 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in setup_stub_lkb()
3990 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in setup_stub_lkb()
3996 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) in validate_message() argument
3998 int from = le32_to_cpu(ms->m_header.h_nodeid); in validate_message()
4002 if (ms->m_flags & cpu_to_le32(DLM_IFL_USER) && in validate_message()
4003 ~lkb->lkb_flags & DLM_IFL_USER) { in validate_message()
4004 log_error(lkb->lkb_resource->res_ls, in validate_message()
4006 error = -EINVAL; in validate_message()
4010 switch (ms->m_type) { in validate_message()
4014 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from) in validate_message()
4015 error = -EINVAL; in validate_message()
4023 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from) in validate_message()
4024 error = -EINVAL; in validate_message()
4029 error = -EINVAL; in validate_message()
4030 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from) in validate_message()
4031 error = -EINVAL; in validate_message()
4035 error = -EINVAL; in validate_message()
4040 log_error(lkb->lkb_resource->res_ls, in validate_message()
4042 le32_to_cpu(ms->m_type), from, lkb->lkb_id, in validate_message()
4043 lkb->lkb_remid, lkb->lkb_flags, lkb->lkb_nodeid); in validate_message()
4050 struct dlm_message *ms; in send_repeat_remove() local
4060 b = hash & (ls->ls_rsbtbl_size - 1); in send_repeat_remove()
4066 spin_lock(&ls->ls_rsbtbl[b].lock); in send_repeat_remove()
4067 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in send_repeat_remove()
4069 spin_unlock(&ls->ls_rsbtbl[b].lock); in send_repeat_remove()
4074 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in send_repeat_remove()
4076 spin_unlock(&ls->ls_rsbtbl[b].lock); in send_repeat_remove()
4081 /* use ls->remove_name2 to avoid conflict with shrink? */ in send_repeat_remove()
4083 spin_lock(&ls->ls_remove_spin); in send_repeat_remove()
4084 ls->ls_remove_len = len; in send_repeat_remove()
4085 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); in send_repeat_remove()
4086 spin_unlock(&ls->ls_remove_spin); in send_repeat_remove()
4087 spin_unlock(&ls->ls_rsbtbl[b].lock); in send_repeat_remove()
4090 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh); in send_repeat_remove()
4094 memcpy(ms->m_extra, name, len); in send_repeat_remove()
4095 ms->m_hash = cpu_to_le32(hash); in send_repeat_remove()
4097 send_message(mh, ms); in send_repeat_remove()
4100 spin_lock(&ls->ls_remove_spin); in send_repeat_remove()
4101 ls->ls_remove_len = 0; in send_repeat_remove()
4102 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); in send_repeat_remove()
4103 spin_unlock(&ls->ls_remove_spin); in send_repeat_remove()
4104 wake_up(&ls->ls_remove_wait); in send_repeat_remove()
4107 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) in receive_request() argument
4114 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request()
4120 receive_flags(lkb, ms); in receive_request()
4121 lkb->lkb_flags |= DLM_IFL_MSTCPY; in receive_request()
4122 error = receive_request_args(ls, lkb, ms); in receive_request()
4134 namelen = receive_extralen(ms); in receive_request()
4136 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid, in receive_request()
4145 if (r->res_master_nodeid != dlm_our_nodeid()) { in receive_request()
4163 if (error == -EINPROGRESS) in receive_request()
4176 /* We could repeatedly return -EBADR here if our send_remove() is in receive_request()
4185 if (error != -ENOTBLK) { in receive_request()
4187 le32_to_cpu(ms->m_lkid), from_nodeid, error); in receive_request()
4190 if (namelen && error == -EBADR) { in receive_request()
4191 send_repeat_remove(ls, ms->m_extra, namelen); in receive_request()
4195 setup_stub_lkb(ls, ms); in receive_request()
4196 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); in receive_request()
4200 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms) in receive_convert() argument
4206 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_convert()
4210 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { in receive_convert()
4212 "remote %d %x", lkb->lkb_id, lkb->lkb_remid, in receive_convert()
4213 (unsigned long long)lkb->lkb_recover_seq, in receive_convert()
4214 le32_to_cpu(ms->m_header.h_nodeid), in receive_convert()
4215 le32_to_cpu(ms->m_lkid)); in receive_convert()
4216 error = -ENOENT; in receive_convert()
4221 r = lkb->lkb_resource; in receive_convert()
4226 error = validate_message(lkb, ms); in receive_convert()
4230 receive_flags(lkb, ms); in receive_convert()
4232 error = receive_convert_args(ls, lkb, ms); in receive_convert()
4251 setup_stub_lkb(ls, ms); in receive_convert()
4252 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); in receive_convert()
4256 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) in receive_unlock() argument
4262 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_unlock()
4266 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { in receive_unlock()
4268 lkb->lkb_id, lkb->lkb_remid, in receive_unlock()
4269 le32_to_cpu(ms->m_header.h_nodeid), in receive_unlock()
4270 le32_to_cpu(ms->m_lkid)); in receive_unlock()
4271 error = -ENOENT; in receive_unlock()
4276 r = lkb->lkb_resource; in receive_unlock()
4281 error = validate_message(lkb, ms); in receive_unlock()
4285 receive_flags(lkb, ms); in receive_unlock()
4287 error = receive_unlock_args(ls, lkb, ms); in receive_unlock()
4303 setup_stub_lkb(ls, ms); in receive_unlock()
4304 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); in receive_unlock()
4308 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) in receive_cancel() argument
4314 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_cancel()
4318 receive_flags(lkb, ms); in receive_cancel()
4320 r = lkb->lkb_resource; in receive_cancel()
4325 error = validate_message(lkb, ms); in receive_cancel()
4339 setup_stub_lkb(ls, ms); in receive_cancel()
4340 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); in receive_cancel()
4344 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms) in receive_grant() argument
4350 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_grant()
4354 r = lkb->lkb_resource; in receive_grant()
4359 error = validate_message(lkb, ms); in receive_grant()
4363 receive_flags_reply(lkb, ms); in receive_grant()
4365 munge_altmode(lkb, ms); in receive_grant()
4366 grant_lock_pc(r, lkb, ms); in receive_grant()
4375 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms) in receive_bast() argument
4381 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_bast()
4385 r = lkb->lkb_resource; in receive_bast()
4390 error = validate_message(lkb, ms); in receive_bast()
4394 queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode)); in receive_bast()
4395 lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode); in receive_bast()
4403 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms) in receive_lookup() argument
4407 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_lookup()
4410 len = receive_extralen(ms); in receive_lookup()
4412 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0, in receive_lookup()
4417 receive_request(ls, ms); in receive_lookup()
4420 send_lookup_reply(ls, ms, ret_nodeid, error); in receive_lookup()
4423 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms) in receive_remove() argument
4430 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_remove()
4432 len = receive_extralen(ms); in receive_remove()
4440 dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash)); in receive_remove()
4457 memcpy(name, ms->m_extra, len); in receive_remove()
4460 b = hash & (ls->ls_rsbtbl_size - 1); in receive_remove()
4462 spin_lock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4464 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in receive_remove()
4467 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in receive_remove()
4472 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4475 if (r->res_master_nodeid != from_nodeid) { in receive_remove()
4478 from_nodeid, r->res_master_nodeid); in receive_remove()
4480 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4485 from_nodeid, r->res_master_nodeid, r->res_first_lkid, in receive_remove()
4487 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4491 if (r->res_master_nodeid != from_nodeid) { in receive_remove()
4493 from_nodeid, r->res_master_nodeid); in receive_remove()
4495 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4499 if (kref_put(&r->res_ref, kill_rsb)) { in receive_remove()
4500 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in receive_remove()
4501 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4507 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4511 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms) in receive_purge() argument
4513 do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid)); in receive_purge()
4516 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) in receive_request_reply() argument
4521 int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request_reply()
4523 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_request_reply()
4527 r = lkb->lkb_resource; in receive_request_reply()
4531 error = validate_message(lkb, ms); in receive_request_reply()
4535 mstype = lkb->lkb_wait_type; in receive_request_reply()
4539 lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid), in receive_request_reply()
4540 from_dlm_errno(le32_to_cpu(ms->m_result))); in receive_request_reply()
4548 r->res_master_nodeid = from_nodeid; in receive_request_reply()
4549 r->res_nodeid = from_nodeid; in receive_request_reply()
4550 lkb->lkb_nodeid = from_nodeid; in receive_request_reply()
4554 result = from_dlm_errno(le32_to_cpu(ms->m_result)); in receive_request_reply()
4557 case -EAGAIN: in receive_request_reply()
4559 queue_cast(r, lkb, -EAGAIN); in receive_request_reply()
4560 confirm_master(r, -EAGAIN); in receive_request_reply()
4564 case -EINPROGRESS: in receive_request_reply()
4567 receive_flags_reply(lkb, ms); in receive_request_reply()
4568 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in receive_request_reply()
4570 munge_altmode(lkb, ms); in receive_request_reply()
4575 grant_lock_pc(r, lkb, ms); in receive_request_reply()
4581 case -EBADR: in receive_request_reply()
4582 case -ENOTBLK: in receive_request_reply()
4585 "master %d dir %d first %x %s", lkb->lkb_id, in receive_request_reply()
4586 from_nodeid, result, r->res_master_nodeid, in receive_request_reply()
4587 r->res_dir_nodeid, r->res_first_lkid, r->res_name); in receive_request_reply()
4589 if (r->res_dir_nodeid != dlm_our_nodeid() && in receive_request_reply()
4590 r->res_master_nodeid != dlm_our_nodeid()) { in receive_request_reply()
4591 /* cause _request_lock->set_master->send_lookup */ in receive_request_reply()
4592 r->res_master_nodeid = 0; in receive_request_reply()
4593 r->res_nodeid = -1; in receive_request_reply()
4594 lkb->lkb_nodeid = -1; in receive_request_reply()
4605 if (r->res_master_nodeid == dlm_our_nodeid()) in receive_request_reply()
4612 lkb->lkb_id, result); in receive_request_reply()
4615 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) { in receive_request_reply()
4617 lkb->lkb_id, result); in receive_request_reply()
4618 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; in receive_request_reply()
4619 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; in receive_request_reply()
4621 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) { in receive_request_reply()
4622 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id); in receive_request_reply()
4623 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; in receive_request_reply()
4624 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; in receive_request_reply()
4627 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; in receive_request_reply()
4628 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; in receive_request_reply()
4638 struct dlm_message *ms) in __receive_convert_reply() argument
4641 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in __receive_convert_reply()
4642 case -EAGAIN: in __receive_convert_reply()
4644 queue_cast(r, lkb, -EAGAIN); in __receive_convert_reply()
4647 case -EDEADLK: in __receive_convert_reply()
4648 receive_flags_reply(lkb, ms); in __receive_convert_reply()
4650 queue_cast(r, lkb, -EDEADLK); in __receive_convert_reply()
4653 case -EINPROGRESS: in __receive_convert_reply()
4655 receive_flags_reply(lkb, ms); in __receive_convert_reply()
4665 receive_flags_reply(lkb, ms); in __receive_convert_reply()
4668 grant_lock_pc(r, lkb, ms); in __receive_convert_reply()
4673 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d", in __receive_convert_reply()
4674 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), in __receive_convert_reply()
4675 le32_to_cpu(ms->m_lkid), in __receive_convert_reply()
4676 from_dlm_errno(le32_to_cpu(ms->m_result))); in __receive_convert_reply()
4682 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms) in _receive_convert_reply() argument
4684 struct dlm_rsb *r = lkb->lkb_resource; in _receive_convert_reply()
4690 error = validate_message(lkb, ms); in _receive_convert_reply()
4695 error = remove_from_waiters_ms(lkb, ms); in _receive_convert_reply()
4699 __receive_convert_reply(r, lkb, ms); in _receive_convert_reply()
4705 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms) in receive_convert_reply() argument
4710 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_convert_reply()
4714 _receive_convert_reply(lkb, ms); in receive_convert_reply()
4719 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) in _receive_unlock_reply() argument
4721 struct dlm_rsb *r = lkb->lkb_resource; in _receive_unlock_reply()
4727 error = validate_message(lkb, ms); in _receive_unlock_reply()
4732 error = remove_from_waiters_ms(lkb, ms); in _receive_unlock_reply()
4738 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in _receive_unlock_reply()
4739 case -DLM_EUNLOCK: in _receive_unlock_reply()
4740 receive_flags_reply(lkb, ms); in _receive_unlock_reply()
4742 queue_cast(r, lkb, -DLM_EUNLOCK); in _receive_unlock_reply()
4744 case -ENOENT: in _receive_unlock_reply()
4747 log_error(r->res_ls, "receive_unlock_reply %x error %d", in _receive_unlock_reply()
4748 lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_unlock_reply()
4755 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms) in receive_unlock_reply() argument
4760 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_unlock_reply()
4764 _receive_unlock_reply(lkb, ms); in receive_unlock_reply()
4769 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) in _receive_cancel_reply() argument
4771 struct dlm_rsb *r = lkb->lkb_resource; in _receive_cancel_reply()
4777 error = validate_message(lkb, ms); in _receive_cancel_reply()
4782 error = remove_from_waiters_ms(lkb, ms); in _receive_cancel_reply()
4788 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in _receive_cancel_reply()
4789 case -DLM_ECANCEL: in _receive_cancel_reply()
4790 receive_flags_reply(lkb, ms); in _receive_cancel_reply()
4792 queue_cast(r, lkb, -DLM_ECANCEL); in _receive_cancel_reply()
4797 log_error(r->res_ls, "receive_cancel_reply %x error %d", in _receive_cancel_reply()
4798 lkb->lkb_id, in _receive_cancel_reply()
4799 from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_cancel_reply()
4806 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms) in receive_cancel_reply() argument
4811 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_cancel_reply()
4815 _receive_cancel_reply(lkb, ms); in receive_cancel_reply()
4820 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) in receive_lookup_reply() argument
4827 error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb); in receive_lookup_reply()
4830 le32_to_cpu(ms->m_lkid)); in receive_lookup_reply()
4834 /* ms->m_result is the value returned by dlm_master_lookup on dir node in receive_lookup_reply()
4835 FIXME: will a non-zero error ever be returned? */ in receive_lookup_reply()
4837 r = lkb->lkb_resource; in receive_lookup_reply()
4845 ret_nodeid = le32_to_cpu(ms->m_nodeid); in receive_lookup_reply()
4853 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) { in receive_lookup_reply()
4857 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), in receive_lookup_reply()
4858 ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid, in receive_lookup_reply()
4859 dlm_our_nodeid(), r->res_first_lkid, r->res_name); in receive_lookup_reply()
4863 r->res_master_nodeid = ret_nodeid; in receive_lookup_reply()
4864 r->res_nodeid = 0; in receive_lookup_reply()
4866 r->res_first_lkid = 0; in receive_lookup_reply()
4867 } else if (ret_nodeid == -1) { in receive_lookup_reply()
4870 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid)); in receive_lookup_reply()
4871 r->res_master_nodeid = 0; in receive_lookup_reply()
4872 r->res_nodeid = -1; in receive_lookup_reply()
4873 lkb->lkb_nodeid = -1; in receive_lookup_reply()
4876 r->res_master_nodeid = ret_nodeid; in receive_lookup_reply()
4877 r->res_nodeid = ret_nodeid; in receive_lookup_reply()
4882 lkb->lkb_id, lkb->lkb_flags); in receive_lookup_reply()
4899 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, in _receive_message() argument
4904 if (!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid))) { in _receive_message()
4905 log_limit(ls, "receive %d from non-member %d %x %x %d", in _receive_message()
4906 le32_to_cpu(ms->m_type), in _receive_message()
4907 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4908 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), in _receive_message()
4909 from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_message()
4913 switch (ms->m_type) { in _receive_message()
4918 error = receive_request(ls, ms); in _receive_message()
4922 error = receive_convert(ls, ms); in _receive_message()
4926 error = receive_unlock(ls, ms); in _receive_message()
4931 error = receive_cancel(ls, ms); in _receive_message()
4937 error = receive_request_reply(ls, ms); in _receive_message()
4941 error = receive_convert_reply(ls, ms); in _receive_message()
4945 error = receive_unlock_reply(ls, ms); in _receive_message()
4949 error = receive_cancel_reply(ls, ms); in _receive_message()
4956 error = receive_grant(ls, ms); in _receive_message()
4961 error = receive_bast(ls, ms); in _receive_message()
4967 receive_lookup(ls, ms); in _receive_message()
4971 receive_remove(ls, ms); in _receive_message()
4977 receive_lookup_reply(ls, ms); in _receive_message()
4983 receive_purge(ls, ms); in _receive_message()
4988 le32_to_cpu(ms->m_type)); in _receive_message()
5002 if (error == -ENOENT && noent) { in _receive_message()
5004 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), in _receive_message()
5005 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
5006 le32_to_cpu(ms->m_lkid), saved_seq); in _receive_message()
5007 } else if (error == -ENOENT) { in _receive_message()
5009 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), in _receive_message()
5010 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
5011 le32_to_cpu(ms->m_lkid), saved_seq); in _receive_message()
5013 if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT)) in _receive_message()
5014 dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash)); in _receive_message()
5017 if (error == -EINVAL) { in _receive_message()
5020 le32_to_cpu(ms->m_type), in _receive_message()
5021 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
5022 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), in _receive_message()
5035 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms, in dlm_receive_message() argument
5042 if (!ls->ls_generation) { in dlm_receive_message()
5044 le32_to_cpu(ms->m_type), nodeid); in dlm_receive_message()
5048 dlm_add_requestqueue(ls, nodeid, ms); in dlm_receive_message()
5051 _receive_message(ls, ms, 0); in dlm_receive_message()
5058 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, in dlm_receive_message_saved() argument
5061 _receive_message(ls, ms, saved_seq); in dlm_receive_message_saved()
5071 struct dlm_header *hd = &p->header; in dlm_receive_buffer()
5075 switch (hd->h_cmd) { in dlm_receive_buffer()
5077 type = le32_to_cpu(p->message.m_type); in dlm_receive_buffer()
5080 type = le32_to_cpu(p->rcom.rc_type); in dlm_receive_buffer()
5083 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); in dlm_receive_buffer()
5087 if (le32_to_cpu(hd->h_nodeid) != nodeid) { in dlm_receive_buffer()
5089 le32_to_cpu(hd->h_nodeid), nodeid, in dlm_receive_buffer()
5090 le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
5094 ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
5099 le32_to_cpu(hd->u.h_lockspace), nodeid, in dlm_receive_buffer()
5100 hd->h_cmd, type); in dlm_receive_buffer()
5103 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) in dlm_receive_buffer()
5104 dlm_send_ls_not_ready(nodeid, &p->rcom); in dlm_receive_buffer()
5109 be inactive (in this ls) before transitioning to recovery mode */ in dlm_receive_buffer()
5111 down_read(&ls->ls_recv_active); in dlm_receive_buffer()
5112 if (hd->h_cmd == DLM_MSG) in dlm_receive_buffer()
5113 dlm_receive_message(ls, &p->message, nodeid); in dlm_receive_buffer()
5114 else if (hd->h_cmd == DLM_RCOM) in dlm_receive_buffer()
5115 dlm_receive_rcom(ls, &p->rcom, nodeid); in dlm_receive_buffer()
5118 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
5119 up_read(&ls->ls_recv_active); in dlm_receive_buffer()
5130 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS); in recover_convert_waiter()
5131 ms_stub->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); in recover_convert_waiter()
5132 ms_stub->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS)); in recover_convert_waiter()
5133 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in recover_convert_waiter()
5137 lkb->lkb_grmode = DLM_LOCK_IV; in recover_convert_waiter()
5138 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT); in recover_convert_waiter()
5141 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { in recover_convert_waiter()
5142 lkb->lkb_flags |= DLM_IFL_RESEND; in recover_convert_waiter()
5145 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down in recover_convert_waiter()
5158 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid)) in waiter_needs_recovery()
5166 dead node. Requests and up-conversions we flag to be resent after
5167 recovery. Down-conversions can just be completed with a fake reply like
5181 mutex_lock(&ls->ls_waiters_mutex); in dlm_recover_waiters_pre()
5183 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { in dlm_recover_waiters_pre()
5185 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource); in dlm_recover_waiters_pre()
5190 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) { in dlm_recover_waiters_pre()
5193 lkb->lkb_id, in dlm_recover_waiters_pre()
5194 lkb->lkb_remid, in dlm_recover_waiters_pre()
5195 lkb->lkb_wait_type, in dlm_recover_waiters_pre()
5196 lkb->lkb_resource->res_nodeid, in dlm_recover_waiters_pre()
5197 lkb->lkb_nodeid, in dlm_recover_waiters_pre()
5198 lkb->lkb_wait_nodeid, in dlm_recover_waiters_pre()
5205 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) { in dlm_recover_waiters_pre()
5206 lkb->lkb_flags |= DLM_IFL_RESEND; in dlm_recover_waiters_pre()
5213 wait_type = lkb->lkb_wait_type; in dlm_recover_waiters_pre()
5214 stub_unlock_result = -DLM_EUNLOCK; in dlm_recover_waiters_pre()
5215 stub_cancel_result = -DLM_ECANCEL; in dlm_recover_waiters_pre()
5225 if (lkb->lkb_grmode == DLM_LOCK_IV) in dlm_recover_waiters_pre()
5230 if (lkb->lkb_grmode == DLM_LOCK_IV) in dlm_recover_waiters_pre()
5231 stub_unlock_result = -ENOENT; in dlm_recover_waiters_pre()
5235 lkb->lkb_id, lkb->lkb_flags, wait_type, in dlm_recover_waiters_pre()
5242 lkb->lkb_flags |= DLM_IFL_RESEND; in dlm_recover_waiters_pre()
5252 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS); in dlm_recover_waiters_pre()
5253 ms_stub->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY); in dlm_recover_waiters_pre()
5254 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_unlock_result)); in dlm_recover_waiters_pre()
5255 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in dlm_recover_waiters_pre()
5263 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS); in dlm_recover_waiters_pre()
5264 ms_stub->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY); in dlm_recover_waiters_pre()
5265 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_cancel_result)); in dlm_recover_waiters_pre()
5266 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in dlm_recover_waiters_pre()
5273 lkb->lkb_wait_type, wait_type); in dlm_recover_waiters_pre()
5277 mutex_unlock(&ls->ls_waiters_mutex); in dlm_recover_waiters_pre()
5285 mutex_lock(&ls->ls_waiters_mutex); in find_resend_waiter()
5286 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { in find_resend_waiter()
5287 if (iter->lkb_flags & DLM_IFL_RESEND) { in find_resend_waiter()
5293 mutex_unlock(&ls->ls_waiters_mutex); in find_resend_waiter()
5299 master or dir-node for r. Processing the lkb may result in it being placed
5323 error = -EINTR; in dlm_recover_waiters_post()
5331 r = lkb->lkb_resource; in dlm_recover_waiters_post()
5335 mstype = lkb->lkb_wait_type; in dlm_recover_waiters_post()
5342 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype, in dlm_recover_waiters_post()
5343 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, in dlm_recover_waiters_post()
5350 lkb->lkb_flags &= ~DLM_IFL_RESEND; in dlm_recover_waiters_post()
5351 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; in dlm_recover_waiters_post()
5352 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; in dlm_recover_waiters_post()
5353 lkb->lkb_wait_type = 0; in dlm_recover_waiters_post()
5357 while (lkb->lkb_wait_count) { in dlm_recover_waiters_post()
5358 lkb->lkb_wait_count--; in dlm_recover_waiters_post()
5361 mutex_lock(&ls->ls_waiters_mutex); in dlm_recover_waiters_post()
5362 list_del_init(&lkb->lkb_wait_reply); in dlm_recover_waiters_post()
5363 mutex_unlock(&ls->ls_waiters_mutex); in dlm_recover_waiters_post()
5370 queue_cast(r, lkb, ou ? -DLM_EUNLOCK : in dlm_recover_waiters_post()
5371 -DLM_ECANCEL); in dlm_recover_waiters_post()
5376 queue_cast(r, lkb, -DLM_ECANCEL); in dlm_recover_waiters_post()
5378 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK; in dlm_recover_waiters_post()
5404 lkb->lkb_id, mstype, r->res_nodeid, in dlm_recover_waiters_post()
5427 if (lkb->lkb_recover_seq == ls->ls_recover_seq) in purge_mstcpy_list()
5440 struct dlm_ls *ls = r->res_ls; in dlm_purge_mstcpy_locks()
5442 purge_mstcpy_list(ls, r, &r->res_grantqueue); in dlm_purge_mstcpy_locks()
5443 purge_mstcpy_list(ls, r, &r->res_convertqueue); in dlm_purge_mstcpy_locks()
5444 purge_mstcpy_list(ls, r, &r->res_waitqueue); in dlm_purge_mstcpy_locks()
5457 if ((lkb->lkb_nodeid == nodeid_gone) || in purge_dead_list()
5458 dlm_is_removed(ls, lkb->lkb_nodeid)) { in purge_dead_list()
5462 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) && in purge_dead_list()
5463 (lkb->lkb_grmode >= DLM_LOCK_PW)) { in purge_dead_list()
5493 list_for_each_entry(memb, &ls->ls_nodes_gone, list) { in dlm_recover_purge()
5495 nodeid_gone = memb->nodeid; in dlm_recover_purge()
5501 down_write(&ls->ls_root_sem); in dlm_recover_purge()
5502 list_for_each_entry(r, &ls->ls_root_list, res_root_list) { in dlm_recover_purge()
5506 purge_dead_list(ls, r, &r->res_grantqueue, in dlm_recover_purge()
5508 purge_dead_list(ls, r, &r->res_convertqueue, in dlm_recover_purge()
5510 purge_dead_list(ls, r, &r->res_waitqueue, in dlm_recover_purge()
5517 up_write(&ls->ls_root_sem); in dlm_recover_purge()
5529 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5530 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { in find_grant_rsb()
5540 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5543 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5555 * Simplest would be to go through each master rsb and check for non-empty
5575 if (bucket == ls->ls_rsbtbl_size - 1) in dlm_recover_grant()
5604 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid) in search_remid_list()
5615 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid); in search_remid()
5618 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid); in search_remid()
5621 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid); in search_remid()
5631 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in receive_rcom_lock_args()
5633 lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); in receive_rcom_lock_args()
5634 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); in receive_rcom_lock_args()
5635 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); in receive_rcom_lock_args()
5636 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); in receive_rcom_lock_args()
5637 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF; in receive_rcom_lock_args()
5638 lkb->lkb_flags |= DLM_IFL_MSTCPY; in receive_rcom_lock_args()
5639 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); in receive_rcom_lock_args()
5640 lkb->lkb_rqmode = rl->rl_rqmode; in receive_rcom_lock_args()
5641 lkb->lkb_grmode = rl->rl_grmode; in receive_rcom_lock_args()
5644 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; in receive_rcom_lock_args()
5645 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; in receive_rcom_lock_args()
5647 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_rcom_lock_args()
5648 int lvblen = le16_to_cpu(rc->rc_header.h_length) - in receive_rcom_lock_args()
5649 sizeof(struct dlm_rcom) - sizeof(struct rcom_lock); in receive_rcom_lock_args()
5650 if (lvblen > ls->ls_lvblen) in receive_rcom_lock_args()
5651 return -EINVAL; in receive_rcom_lock_args()
5652 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_rcom_lock_args()
5653 if (!lkb->lkb_lvbptr) in receive_rcom_lock_args()
5654 return -ENOMEM; in receive_rcom_lock_args()
5655 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); in receive_rcom_lock_args()
5662 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && in receive_rcom_lock_args()
5664 rl->rl_status = DLM_LKSTS_CONVERT; in receive_rcom_lock_args()
5665 lkb->lkb_grmode = DLM_LOCK_IV; in receive_rcom_lock_args()
5681 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in dlm_recover_master_copy()
5685 int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); in dlm_recover_master_copy()
5688 if (rl->rl_parent_lkid) { in dlm_recover_master_copy()
5689 error = -EOPNOTSUPP; in dlm_recover_master_copy()
5693 remid = le32_to_cpu(rl->rl_lkid); in dlm_recover_master_copy()
5703 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), in dlm_recover_master_copy()
5713 error = -EBADR; in dlm_recover_master_copy()
5719 error = -EEXIST; in dlm_recover_master_copy()
5734 add_lkb(r, lkb, rl->rl_status); in dlm_recover_master_copy()
5735 ls->ls_recover_locks_in++; in dlm_recover_master_copy()
5737 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) in dlm_recover_master_copy()
5742 saving in its process-copy lkb */ in dlm_recover_master_copy()
5743 rl->rl_remid = cpu_to_le32(lkb->lkb_id); in dlm_recover_master_copy()
5745 lkb->lkb_recover_seq = ls->ls_recover_seq; in dlm_recover_master_copy()
5751 if (error && error != -EEXIST) in dlm_recover_master_copy()
5754 rl->rl_result = cpu_to_le32(error); in dlm_recover_master_copy()
5761 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in dlm_recover_process_copy()
5767 lkid = le32_to_cpu(rl->rl_lkid); in dlm_recover_process_copy()
5768 remid = le32_to_cpu(rl->rl_remid); in dlm_recover_process_copy()
5769 result = le32_to_cpu(rl->rl_result); in dlm_recover_process_copy()
5774 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5779 r = lkb->lkb_resource; in dlm_recover_process_copy()
5785 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5791 return -EINVAL; in dlm_recover_process_copy()
5795 case -EBADR: in dlm_recover_process_copy()
5801 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5806 case -EEXIST: in dlm_recover_process_copy()
5808 lkb->lkb_remid = remid; in dlm_recover_process_copy()
5812 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5852 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); in dlm_user_request()
5853 if (!ua->lksb.sb_lvbptr) { in dlm_user_request()
5855 error = -ENOMEM; in dlm_user_request()
5860 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, in dlm_user_request()
5863 error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua, in dlm_user_request()
5867 kfree(ua->lksb.sb_lvbptr); in dlm_user_request()
5868 ua->lksb.sb_lvbptr = NULL; in dlm_user_request()
5876 lkb->lkb_flags |= DLM_IFL_USER; in dlm_user_request()
5882 case -EINPROGRESS: in dlm_user_request()
5885 case -EAGAIN: in dlm_user_request()
5892 /* add this new lkb to the per-process list of locks */ in dlm_user_request()
5893 spin_lock(&ua->proc->locks_spin); in dlm_user_request()
5895 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); in dlm_user_request()
5896 spin_unlock(&ua->proc->locks_spin); in dlm_user_request()
5932 ua = lkb->lkb_ua; in dlm_user_convert()
5934 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { in dlm_user_convert()
5935 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); in dlm_user_convert()
5936 if (!ua->lksb.sb_lvbptr) { in dlm_user_convert()
5937 error = -ENOMEM; in dlm_user_convert()
5941 if (lvb_in && ua->lksb.sb_lvbptr) in dlm_user_convert()
5942 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); in dlm_user_convert()
5944 ua->xid = ua_tmp->xid; in dlm_user_convert()
5945 ua->castparam = ua_tmp->castparam; in dlm_user_convert()
5946 ua->castaddr = ua_tmp->castaddr; in dlm_user_convert()
5947 ua->bastparam = ua_tmp->bastparam; in dlm_user_convert()
5948 ua->bastaddr = ua_tmp->bastaddr; in dlm_user_convert()
5949 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_convert()
5952 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, in dlm_user_convert()
5955 error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua, in dlm_user_convert()
5963 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK) in dlm_user_convert()
5989 mutex_lock(&ls->ls_orphans_mutex); in dlm_user_adopt_orphan()
5990 list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { in dlm_user_adopt_orphan()
5991 if (iter->lkb_resource->res_length != namelen) in dlm_user_adopt_orphan()
5993 if (memcmp(iter->lkb_resource->res_name, name, namelen)) in dlm_user_adopt_orphan()
5995 if (iter->lkb_grmode != mode) { in dlm_user_adopt_orphan()
6001 list_del_init(&iter->lkb_ownqueue); in dlm_user_adopt_orphan()
6002 iter->lkb_flags &= ~DLM_IFL_ORPHAN; in dlm_user_adopt_orphan()
6003 *lkid = iter->lkb_id; in dlm_user_adopt_orphan()
6006 mutex_unlock(&ls->ls_orphans_mutex); in dlm_user_adopt_orphan()
6009 rv = -EAGAIN; in dlm_user_adopt_orphan()
6014 rv = -ENOENT; in dlm_user_adopt_orphan()
6018 lkb->lkb_exflags = flags; in dlm_user_adopt_orphan()
6019 lkb->lkb_ownpid = (int) current->pid; in dlm_user_adopt_orphan()
6021 ua = lkb->lkb_ua; in dlm_user_adopt_orphan()
6023 ua->proc = ua_tmp->proc; in dlm_user_adopt_orphan()
6024 ua->xid = ua_tmp->xid; in dlm_user_adopt_orphan()
6025 ua->castparam = ua_tmp->castparam; in dlm_user_adopt_orphan()
6026 ua->castaddr = ua_tmp->castaddr; in dlm_user_adopt_orphan()
6027 ua->bastparam = ua_tmp->bastparam; in dlm_user_adopt_orphan()
6028 ua->bastaddr = ua_tmp->bastaddr; in dlm_user_adopt_orphan()
6029 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_adopt_orphan()
6037 spin_lock(&ua->proc->locks_spin); in dlm_user_adopt_orphan()
6038 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); in dlm_user_adopt_orphan()
6039 spin_unlock(&ua->proc->locks_spin); in dlm_user_adopt_orphan()
6061 ua = lkb->lkb_ua; in dlm_user_unlock()
6063 if (lvb_in && ua->lksb.sb_lvbptr) in dlm_user_unlock()
6064 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); in dlm_user_unlock()
6065 if (ua_tmp->castparam) in dlm_user_unlock()
6066 ua->castparam = ua_tmp->castparam; in dlm_user_unlock()
6067 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_unlock()
6075 if (error == -DLM_EUNLOCK) in dlm_user_unlock()
6078 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK)) in dlm_user_unlock()
6083 spin_lock(&ua->proc->locks_spin); in dlm_user_unlock()
6085 if (!list_empty(&lkb->lkb_ownqueue)) in dlm_user_unlock()
6086 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); in dlm_user_unlock()
6087 spin_unlock(&ua->proc->locks_spin); in dlm_user_unlock()
6113 ua = lkb->lkb_ua; in dlm_user_cancel()
6114 if (ua_tmp->castparam) in dlm_user_cancel()
6115 ua->castparam = ua_tmp->castparam; in dlm_user_cancel()
6116 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_cancel()
6124 if (error == -DLM_ECANCEL) in dlm_user_cancel()
6127 if (error == -EBUSY) in dlm_user_cancel()
6154 ua = lkb->lkb_ua; in dlm_user_deadlock()
6162 r = lkb->lkb_resource; in dlm_user_deadlock()
6169 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL; in dlm_user_deadlock()
6176 if (error == -DLM_ECANCEL) in dlm_user_deadlock()
6179 if (error == -EBUSY) in dlm_user_deadlock()
6198 mutex_lock(&ls->ls_orphans_mutex); in orphan_proc_lock()
6199 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); in orphan_proc_lock()
6200 mutex_unlock(&ls->ls_orphans_mutex); in orphan_proc_lock()
6202 set_unlock_args(0, lkb->lkb_ua, &args); in orphan_proc_lock()
6205 if (error == -DLM_ECANCEL) in orphan_proc_lock()
6221 lkb->lkb_ua, &args); in unlock_proc_lock()
6224 if (error == -DLM_EUNLOCK) in unlock_proc_lock()
6238 spin_lock(&ls->ls_clear_proc_locks); in del_proc_lock()
6239 if (list_empty(&proc->locks)) in del_proc_lock()
6242 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); in del_proc_lock()
6243 list_del_init(&lkb->lkb_ownqueue); in del_proc_lock()
6245 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) in del_proc_lock()
6246 lkb->lkb_flags |= DLM_IFL_ORPHAN; in del_proc_lock()
6248 lkb->lkb_flags |= DLM_IFL_DEAD; in del_proc_lock()
6250 spin_unlock(&ls->ls_clear_proc_locks); in del_proc_lock()
6255 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6258 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6259 list, and no more device_writes should add lkb's to proc->locks list; so we
6261 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6275 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) in dlm_clear_proc_locks()
6280 /* this removes the reference for the proc->locks list in dlm_clear_proc_locks()
6287 spin_lock(&ls->ls_clear_proc_locks); in dlm_clear_proc_locks()
6289 /* in-progress unlocks */ in dlm_clear_proc_locks()
6290 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { in dlm_clear_proc_locks()
6291 list_del_init(&lkb->lkb_ownqueue); in dlm_clear_proc_locks()
6292 lkb->lkb_flags |= DLM_IFL_DEAD; in dlm_clear_proc_locks()
6296 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { in dlm_clear_proc_locks()
6297 memset(&lkb->lkb_callbacks, 0, in dlm_clear_proc_locks()
6299 list_del_init(&lkb->lkb_cb_list); in dlm_clear_proc_locks()
6303 spin_unlock(&ls->ls_clear_proc_locks); in dlm_clear_proc_locks()
6313 spin_lock(&proc->locks_spin); in purge_proc_locks()
6314 if (!list_empty(&proc->locks)) { in purge_proc_locks()
6315 lkb = list_entry(proc->locks.next, struct dlm_lkb, in purge_proc_locks()
6317 list_del_init(&lkb->lkb_ownqueue); in purge_proc_locks()
6319 spin_unlock(&proc->locks_spin); in purge_proc_locks()
6324 lkb->lkb_flags |= DLM_IFL_DEAD; in purge_proc_locks()
6326 dlm_put_lkb(lkb); /* ref from proc->locks list */ in purge_proc_locks()
6329 spin_lock(&proc->locks_spin); in purge_proc_locks()
6330 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { in purge_proc_locks()
6331 list_del_init(&lkb->lkb_ownqueue); in purge_proc_locks()
6332 lkb->lkb_flags |= DLM_IFL_DEAD; in purge_proc_locks()
6335 spin_unlock(&proc->locks_spin); in purge_proc_locks()
6337 spin_lock(&proc->asts_spin); in purge_proc_locks()
6338 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { in purge_proc_locks()
6339 memset(&lkb->lkb_callbacks, 0, in purge_proc_locks()
6341 list_del_init(&lkb->lkb_cb_list); in purge_proc_locks()
6344 spin_unlock(&proc->asts_spin); in purge_proc_locks()
6353 mutex_lock(&ls->ls_orphans_mutex); in do_purge()
6354 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { in do_purge()
6355 if (pid && lkb->lkb_ownpid != pid) in do_purge()
6358 list_del_init(&lkb->lkb_ownqueue); in do_purge()
6361 mutex_unlock(&ls->ls_orphans_mutex); in do_purge()
6366 struct dlm_message *ms; in send_purge() local
6371 DLM_MSG_PURGE, &ms, &mh); in send_purge()
6374 ms->m_nodeid = cpu_to_le32(nodeid); in send_purge()
6375 ms->m_pid = cpu_to_le32(pid); in send_purge()
6377 return send_message(mh, ms); in send_purge()
6389 if (pid == current->pid) in dlm_user_purge()
6409 return -EOPNOTSUPP; in dlm_debug_add_lkb()
6413 return -ENOMEM; in dlm_debug_add_lkb()
6421 lkb->lkb_flags = lkb_flags; in dlm_debug_add_lkb()
6422 lkb->lkb_nodeid = lkb_nodeid; in dlm_debug_add_lkb()
6423 lkb->lkb_lksb = lksb; in dlm_debug_add_lkb()
6426 lkb->lkb_astparam = (void *)0xDEADBEEF; in dlm_debug_add_lkb()