Lines Matching refs:mle
41 struct dlm_master_list_entry *mle,
45 struct dlm_master_list_entry *mle,
56 struct dlm_master_list_entry *mle, in dlm_mle_equal() argument
60 if (dlm != mle->dlm) in dlm_mle_equal()
63 if (namelen != mle->mnamelen || in dlm_mle_equal()
64 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
75 static void dlm_init_mle(struct dlm_master_list_entry *mle,
81 static void dlm_put_mle(struct dlm_master_list_entry *mle);
82 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
84 struct dlm_master_list_entry **mle,
88 struct dlm_master_list_entry *mle, int to);
93 struct dlm_master_list_entry *mle,
97 struct dlm_master_list_entry *mle,
101 struct dlm_master_list_entry *mle,
164 struct dlm_master_list_entry *mle) in __dlm_mle_attach_hb_events() argument
168 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
173 struct dlm_master_list_entry *mle) in __dlm_mle_detach_hb_events() argument
175 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
176 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
181 struct dlm_master_list_entry *mle) in dlm_mle_detach_hb_events() argument
184 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_detach_hb_events()
188 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) in dlm_get_mle_inuse() argument
191 dlm = mle->dlm; in dlm_get_mle_inuse()
195 mle->inuse++; in dlm_get_mle_inuse()
196 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
199 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) in dlm_put_mle_inuse() argument
202 dlm = mle->dlm; in dlm_put_mle_inuse()
206 mle->inuse--; in dlm_put_mle_inuse()
207 __dlm_put_mle(mle); in dlm_put_mle_inuse()
214 static void __dlm_put_mle(struct dlm_master_list_entry *mle) in __dlm_put_mle() argument
217 dlm = mle->dlm; in __dlm_put_mle()
221 if (!kref_read(&mle->mle_refs)) { in __dlm_put_mle()
224 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle()
225 dlm_print_one_mle(mle); in __dlm_put_mle()
228 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
233 static void dlm_put_mle(struct dlm_master_list_entry *mle) in dlm_put_mle() argument
236 dlm = mle->dlm; in dlm_put_mle()
240 __dlm_put_mle(mle); in dlm_put_mle()
245 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) in dlm_get_mle() argument
247 kref_get(&mle->mle_refs); in dlm_get_mle()
250 static void dlm_init_mle(struct dlm_master_list_entry *mle, in dlm_init_mle() argument
259 mle->dlm = dlm; in dlm_init_mle()
260 mle->type = type; in dlm_init_mle()
261 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
262 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
263 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_init_mle()
264 spin_lock_init(&mle->spinlock); in dlm_init_mle()
265 init_waitqueue_head(&mle->wq); in dlm_init_mle()
266 atomic_set(&mle->woken, 0); in dlm_init_mle()
267 kref_init(&mle->mle_refs); in dlm_init_mle()
268 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_init_mle()
269 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
270 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
271 mle->inuse = 0; in dlm_init_mle()
273 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
274 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
275 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
277 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
279 mle->mleres = res; in dlm_init_mle()
280 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
281 mle->mnamelen = res->lockname.len; in dlm_init_mle()
282 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
285 mle->mleres = NULL; in dlm_init_mle()
286 memcpy(mle->mname, name, namelen); in dlm_init_mle()
287 mle->mnamelen = namelen; in dlm_init_mle()
288 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
291 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
292 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
295 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); in dlm_init_mle()
296 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); in dlm_init_mle()
297 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
298 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
301 __dlm_mle_attach_hb_events(dlm, mle); in dlm_init_mle()
304 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_unlink_mle() argument
309 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
310 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
313 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_insert_mle() argument
319 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
320 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
325 struct dlm_master_list_entry **mle, in dlm_find_mle() argument
340 *mle = tmpmle; in dlm_find_mle()
348 struct dlm_master_list_entry *mle; in dlm_hb_event_notify_attached() local
352 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
354 dlm_mle_node_up(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
356 dlm_mle_node_down(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
361 struct dlm_master_list_entry *mle, in dlm_mle_node_down() argument
364 spin_lock(&mle->spinlock); in dlm_mle_node_down()
366 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
369 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
371 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
375 struct dlm_master_list_entry *mle, in dlm_mle_node_up() argument
378 spin_lock(&mle->spinlock); in dlm_mle_node_up()
380 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
383 set_bit(idx, mle->node_map); in dlm_mle_node_up()
385 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
407 struct dlm_master_list_entry *mle; in dlm_mle_release() local
410 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); in dlm_mle_release()
411 dlm = mle->dlm; in dlm_mle_release()
416 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
417 mle->type); in dlm_mle_release()
420 __dlm_unlink_mle(dlm, mle); in dlm_mle_release()
423 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_release()
425 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
429 kmem_cache_free(dlm_mle_cache, mle); in dlm_mle_release()
709 struct dlm_master_list_entry *mle = NULL; in dlm_get_lock_resource() local
816 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); in dlm_get_lock_resource()
819 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
823 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
832 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
833 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
844 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
845 dlm_put_mle(mle); in dlm_get_lock_resource()
846 mle = NULL; in dlm_get_lock_resource()
855 mle = alloc_mle; in dlm_get_lock_resource()
858 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
859 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
860 __dlm_insert_mle(dlm, mle); in dlm_get_lock_resource()
890 dlm_get_mle_inuse(mle); in dlm_get_lock_resource()
936 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
938 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
941 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
943 if (mle->master <= nodenum) in dlm_get_lock_resource()
951 lockid, nodenum, mle->master); in dlm_get_lock_resource()
957 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
969 dlm_print_one_mle(mle); in dlm_get_lock_resource()
981 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
982 dlm_put_mle(mle); in dlm_get_lock_resource()
984 dlm_put_mle_inuse(mle); in dlm_get_lock_resource()
1005 struct dlm_master_list_entry *mle, in dlm_wait_for_lock_mastery() argument
1026 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1039 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1040 m = mle->master; in dlm_wait_for_lock_mastery()
1041 map_changed = (memcmp(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1042 sizeof(mle->vote_map)) != 0); in dlm_wait_for_lock_mastery()
1043 voting_done = (memcmp(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1044 sizeof(mle->vote_map)) == 0); in dlm_wait_for_lock_mastery()
1051 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1052 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1059 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1084 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_wait_for_lock_mastery()
1089 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1100 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1105 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1106 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1107 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1124 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1210 struct dlm_master_list_entry *mle, in dlm_restart_lock_mastery() argument
1221 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1223 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1234 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1235 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1239 int lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1243 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1249 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1274 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1275 mle->mleres = res; in dlm_restart_lock_mastery()
1282 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_restart_lock_mastery()
1283 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_restart_lock_mastery()
1285 memcpy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1286 sizeof(mle->node_map)); in dlm_restart_lock_mastery()
1288 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1289 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1309 struct dlm_master_list_entry *mle, int to) in dlm_do_master_request() argument
1311 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1318 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1320 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1321 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1354 spin_lock(&mle->spinlock); in dlm_do_master_request()
1357 set_bit(to, mle->response_map); in dlm_do_master_request()
1362 mle->master = to; in dlm_do_master_request()
1366 set_bit(to, mle->response_map); in dlm_do_master_request()
1370 set_bit(to, mle->response_map); in dlm_do_master_request()
1371 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1382 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1409 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; in dlm_master_request_handler() local
1460 if (mle) in dlm_master_request_handler()
1461 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1469 if (mle) in dlm_master_request_handler()
1470 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1484 if (mle) in dlm_master_request_handler()
1485 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1549 if (mle) in dlm_master_request_handler()
1550 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1565 if (!mle) { in dlm_master_request_handler()
1569 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_master_request_handler()
1570 if (!mle) { in dlm_master_request_handler()
1580 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); in dlm_master_request_handler()
1581 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1582 __dlm_insert_mle(dlm, mle); in dlm_master_request_handler()
1677 struct dlm_master_list_entry *mle = NULL; in dlm_do_assert_master() local
1708 if (dlm_find_mle(dlm, &mle, (char *)lockname, in dlm_do_assert_master()
1710 dlm_print_one_mle(mle); in dlm_do_assert_master()
1711 __dlm_put_mle(mle); in dlm_do_assert_master()
1765 struct dlm_master_list_entry *mle = NULL; in dlm_assert_master_handler() local
1794 if (!dlm_find_mle(dlm, &mle, name, namelen)) { in dlm_assert_master_handler()
1800 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_assert_master_handler()
1823 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1834 __dlm_put_mle(mle); in dlm_assert_master_handler()
1853 if (!mle) { in dlm_assert_master_handler()
1863 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1888 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1892 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1893 mle->master, namelen, name); in dlm_assert_master_handler()
1904 if (mle) { in dlm_assert_master_handler()
1909 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1910 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1916 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1924 mle->master = assert->node_idx; in dlm_assert_master_handler()
1925 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1926 wake_up(&mle->wq); in dlm_assert_master_handler()
1927 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1932 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1936 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1939 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1942 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1955 rr = kref_read(&mle->mle_refs); in dlm_assert_master_handler()
1956 if (mle->inuse > 0) { in dlm_assert_master_handler()
1971 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
1972 dlm_print_one_mle(mle); in dlm_assert_master_handler()
1974 __dlm_unlink_mle(dlm, mle); in dlm_assert_master_handler()
1975 __dlm_mle_detach_hb_events(dlm, mle); in dlm_assert_master_handler()
1976 __dlm_put_mle(mle); in dlm_assert_master_handler()
1982 __dlm_put_mle(mle); in dlm_assert_master_handler()
2028 if (mle) in dlm_assert_master_handler()
2029 __dlm_put_mle(mle); in dlm_assert_master_handler()
2545 struct dlm_master_list_entry *mle = NULL; in dlm_migrate_lockres() local
2571 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_lockres()
2572 if (!mle) { in dlm_migrate_lockres()
2584 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2591 dlm_get_mle_inuse(mle); in dlm_migrate_lockres()
2626 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2627 dlm_put_mle(mle); in dlm_migrate_lockres()
2628 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2629 } else if (mle) { in dlm_migrate_lockres()
2630 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_lockres()
2631 mle = NULL; in dlm_migrate_lockres()
2657 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2658 dlm_put_mle(mle); in dlm_migrate_lockres()
2659 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2683 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2684 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2688 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2703 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2704 dlm_put_mle(mle); in dlm_migrate_lockres()
2705 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2726 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2727 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
3107 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; in dlm_migrate_request_handler() local
3120 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_request_handler()
3122 if (!mle) { in dlm_migrate_request_handler()
3139 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3149 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3155 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3183 struct dlm_master_list_entry *mle, in dlm_add_migration_mle() argument
3242 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3243 mle->new_master = new_master; in dlm_add_migration_mle()
3246 mle->master = master; in dlm_add_migration_mle()
3248 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3249 __dlm_insert_mle(dlm, mle); in dlm_add_migration_mle()
3258 struct dlm_master_list_entry *mle) in dlm_reset_mleres_owner() argument
3263 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3264 mle->mnamehash); in dlm_reset_mleres_owner()
3276 __dlm_mle_detach_hb_events(dlm, mle); in dlm_reset_mleres_owner()
3280 __dlm_put_mle(mle); in dlm_reset_mleres_owner()
3288 struct dlm_master_list_entry *mle) in dlm_clean_migration_mle() argument
3290 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_migration_mle()
3292 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3293 __dlm_unlink_mle(dlm, mle); in dlm_clean_migration_mle()
3294 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3295 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3297 wake_up(&mle->wq); in dlm_clean_migration_mle()
3301 struct dlm_master_list_entry *mle, u8 dead_node) in dlm_clean_block_mle() argument
3305 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3307 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3308 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_clean_block_mle()
3312 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3319 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3320 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3321 wake_up(&mle->wq); in dlm_clean_block_mle()
3324 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_block_mle()
3325 __dlm_put_mle(mle); in dlm_clean_block_mle()
3331 struct dlm_master_list_entry *mle; in dlm_clean_master_list() local
3345 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_clean_master_list()
3346 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3347 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3348 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3353 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3359 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3360 dlm_clean_block_mle(dlm, mle, dead_node); in dlm_clean_master_list()
3375 if (mle->master != dead_node && in dlm_clean_master_list()
3376 mle->new_master != dead_node) in dlm_clean_master_list()
3379 if (mle->new_master == dead_node && mle->inuse) { in dlm_clean_master_list()
3384 mle->master); in dlm_clean_master_list()
3390 dlm_clean_migration_mle(dlm, mle); in dlm_clean_master_list()
3393 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3394 mle->new_master); in dlm_clean_master_list()
3401 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3407 __dlm_put_mle(mle); in dlm_clean_master_list()
3534 struct dlm_master_list_entry *mle; in dlm_force_free_mles() local
3551 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_force_free_mles()
3552 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3553 mlog(ML_ERROR, "bad mle: %p\n", mle); in dlm_force_free_mles()
3554 dlm_print_one_mle(mle); in dlm_force_free_mles()
3556 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3557 wake_up(&mle->wq); in dlm_force_free_mles()
3559 __dlm_unlink_mle(dlm, mle); in dlm_force_free_mles()
3560 __dlm_mle_detach_hb_events(dlm, mle); in dlm_force_free_mles()
3561 __dlm_put_mle(mle); in dlm_force_free_mles()