Lines Matching refs:mle

56 			      struct dlm_master_list_entry *mle,
60 struct dlm_master_list_entry *mle,
71 struct dlm_master_list_entry *mle, in dlm_mle_equal() argument
75 if (dlm != mle->dlm) in dlm_mle_equal()
78 if (namelen != mle->mnamelen || in dlm_mle_equal()
79 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
99 struct dlm_master_list_entry **mle,
103 struct dlm_master_list_entry *mle, int to);
108 struct dlm_master_list_entry *mle,
112 struct dlm_master_list_entry *mle,
116 struct dlm_master_list_entry *mle,
179 struct dlm_master_list_entry *mle) in __dlm_mle_attach_hb_events() argument
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
188 struct dlm_master_list_entry *mle) in __dlm_mle_detach_hb_events() argument
190 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
191 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
196 struct dlm_master_list_entry *mle) in dlm_mle_detach_hb_events() argument
199 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_detach_hb_events()
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) in dlm_get_mle_inuse() argument
206 dlm = mle->dlm; in dlm_get_mle_inuse()
210 mle->inuse++; in dlm_get_mle_inuse()
211 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) in dlm_put_mle_inuse() argument
217 dlm = mle->dlm; in dlm_put_mle_inuse()
221 mle->inuse--; in dlm_put_mle_inuse()
222 __dlm_put_mle(mle); in dlm_put_mle_inuse()
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle) in __dlm_put_mle() argument
232 dlm = mle->dlm; in __dlm_put_mle()
236 if (!kref_read(&mle->mle_refs)) { in __dlm_put_mle()
239 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle()
240 dlm_print_one_mle(mle); in __dlm_put_mle()
243 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
248 static void dlm_put_mle(struct dlm_master_list_entry *mle) in dlm_put_mle() argument
251 dlm = mle->dlm; in dlm_put_mle()
255 __dlm_put_mle(mle); in dlm_put_mle()
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) in dlm_get_mle() argument
262 kref_get(&mle->mle_refs); in dlm_get_mle()
265 static void dlm_init_mle(struct dlm_master_list_entry *mle, in dlm_init_mle() argument
274 mle->dlm = dlm; in dlm_init_mle()
275 mle->type = type; in dlm_init_mle()
276 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
277 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_init_mle()
279 spin_lock_init(&mle->spinlock); in dlm_init_mle()
280 init_waitqueue_head(&mle->wq); in dlm_init_mle()
281 atomic_set(&mle->woken, 0); in dlm_init_mle()
282 kref_init(&mle->mle_refs); in dlm_init_mle()
283 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_init_mle()
284 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
285 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
286 mle->inuse = 0; in dlm_init_mle()
288 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
289 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
290 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
292 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
294 mle->mleres = res; in dlm_init_mle()
295 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
296 mle->mnamelen = res->lockname.len; in dlm_init_mle()
297 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
300 mle->mleres = NULL; in dlm_init_mle()
301 memcpy(mle->mname, name, namelen); in dlm_init_mle()
302 mle->mnamelen = namelen; in dlm_init_mle()
303 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
306 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
307 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); in dlm_init_mle()
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); in dlm_init_mle()
312 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
313 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
316 __dlm_mle_attach_hb_events(dlm, mle); in dlm_init_mle()
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_unlink_mle() argument
324 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
325 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) in __dlm_insert_mle() argument
334 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
335 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
340 struct dlm_master_list_entry **mle, in dlm_find_mle() argument
355 *mle = tmpmle; in dlm_find_mle()
363 struct dlm_master_list_entry *mle; in dlm_hb_event_notify_attached() local
367 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
369 dlm_mle_node_up(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
371 dlm_mle_node_down(dlm, mle, NULL, idx); in dlm_hb_event_notify_attached()
376 struct dlm_master_list_entry *mle, in dlm_mle_node_down() argument
379 spin_lock(&mle->spinlock); in dlm_mle_node_down()
381 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
384 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
386 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
390 struct dlm_master_list_entry *mle, in dlm_mle_node_up() argument
393 spin_lock(&mle->spinlock); in dlm_mle_node_up()
395 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
398 set_bit(idx, mle->node_map); in dlm_mle_node_up()
400 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
422 struct dlm_master_list_entry *mle; in dlm_mle_release() local
425 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); in dlm_mle_release()
426 dlm = mle->dlm; in dlm_mle_release()
431 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
432 mle->type); in dlm_mle_release()
435 __dlm_unlink_mle(dlm, mle); in dlm_mle_release()
438 __dlm_mle_detach_hb_events(dlm, mle); in dlm_mle_release()
440 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
444 kmem_cache_free(dlm_mle_cache, mle); in dlm_mle_release()
724 struct dlm_master_list_entry *mle = NULL; in dlm_get_lock_resource() local
831 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); in dlm_get_lock_resource()
834 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
838 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
847 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
848 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
859 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
860 dlm_put_mle(mle); in dlm_get_lock_resource()
861 mle = NULL; in dlm_get_lock_resource()
870 mle = alloc_mle; in dlm_get_lock_resource()
873 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
874 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
875 __dlm_insert_mle(dlm, mle); in dlm_get_lock_resource()
905 dlm_get_mle_inuse(mle); in dlm_get_lock_resource()
951 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
953 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
956 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
958 if (mle->master <= nodenum) in dlm_get_lock_resource()
966 lockid, nodenum, mle->master); in dlm_get_lock_resource()
972 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
984 dlm_print_one_mle(mle); in dlm_get_lock_resource()
996 dlm_mle_detach_hb_events(dlm, mle); in dlm_get_lock_resource()
997 dlm_put_mle(mle); in dlm_get_lock_resource()
999 dlm_put_mle_inuse(mle); in dlm_get_lock_resource()
1020 struct dlm_master_list_entry *mle, in dlm_wait_for_lock_mastery() argument
1041 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1054 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1055 m = mle->master; in dlm_wait_for_lock_mastery()
1056 map_changed = (memcmp(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1057 sizeof(mle->vote_map)) != 0); in dlm_wait_for_lock_mastery()
1058 voting_done = (memcmp(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1059 sizeof(mle->vote_map)) == 0); in dlm_wait_for_lock_mastery()
1066 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1067 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1074 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1099 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_wait_for_lock_mastery()
1104 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1115 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1120 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1121 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1122 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1139 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1225 struct dlm_master_list_entry *mle, in dlm_restart_lock_mastery() argument
1236 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1238 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1249 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1250 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1254 int lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1258 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1264 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1289 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1290 mle->mleres = res; in dlm_restart_lock_mastery()
1297 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_restart_lock_mastery()
1298 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_restart_lock_mastery()
1300 memcpy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1301 sizeof(mle->node_map)); in dlm_restart_lock_mastery()
1303 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1304 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1324 struct dlm_master_list_entry *mle, int to) in dlm_do_master_request() argument
1326 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1333 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1335 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1336 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1369 spin_lock(&mle->spinlock); in dlm_do_master_request()
1372 set_bit(to, mle->response_map); in dlm_do_master_request()
1377 mle->master = to; in dlm_do_master_request()
1381 set_bit(to, mle->response_map); in dlm_do_master_request()
1385 set_bit(to, mle->response_map); in dlm_do_master_request()
1386 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1397 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1424 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; in dlm_master_request_handler() local
1475 if (mle) in dlm_master_request_handler()
1476 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1484 if (mle) in dlm_master_request_handler()
1485 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1499 if (mle) in dlm_master_request_handler()
1500 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1564 if (mle) in dlm_master_request_handler()
1565 kmem_cache_free(dlm_mle_cache, mle); in dlm_master_request_handler()
1580 if (!mle) { in dlm_master_request_handler()
1584 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_master_request_handler()
1585 if (!mle) { in dlm_master_request_handler()
1595 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); in dlm_master_request_handler()
1596 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1597 __dlm_insert_mle(dlm, mle); in dlm_master_request_handler()
1692 struct dlm_master_list_entry *mle = NULL; in dlm_do_assert_master() local
1723 if (dlm_find_mle(dlm, &mle, (char *)lockname, in dlm_do_assert_master()
1725 dlm_print_one_mle(mle); in dlm_do_assert_master()
1726 __dlm_put_mle(mle); in dlm_do_assert_master()
1780 struct dlm_master_list_entry *mle = NULL; in dlm_assert_master_handler() local
1809 if (!dlm_find_mle(dlm, &mle, name, namelen)) { in dlm_assert_master_handler()
1815 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_assert_master_handler()
1838 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1849 __dlm_put_mle(mle); in dlm_assert_master_handler()
1868 if (!mle) { in dlm_assert_master_handler()
1878 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1903 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1907 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1908 mle->master, namelen, name); in dlm_assert_master_handler()
1919 if (mle) { in dlm_assert_master_handler()
1924 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1925 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1931 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1939 mle->master = assert->node_idx; in dlm_assert_master_handler()
1940 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1941 wake_up(&mle->wq); in dlm_assert_master_handler()
1942 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1947 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1951 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1954 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1957 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1970 rr = kref_read(&mle->mle_refs); in dlm_assert_master_handler()
1971 if (mle->inuse > 0) { in dlm_assert_master_handler()
1986 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
1987 dlm_print_one_mle(mle); in dlm_assert_master_handler()
1989 __dlm_unlink_mle(dlm, mle); in dlm_assert_master_handler()
1990 __dlm_mle_detach_hb_events(dlm, mle); in dlm_assert_master_handler()
1991 __dlm_put_mle(mle); in dlm_assert_master_handler()
1997 __dlm_put_mle(mle); in dlm_assert_master_handler()
2043 if (mle) in dlm_assert_master_handler()
2044 __dlm_put_mle(mle); in dlm_assert_master_handler()
2560 struct dlm_master_list_entry *mle = NULL; in dlm_migrate_lockres() local
2588 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_lockres()
2589 if (!mle) { in dlm_migrate_lockres()
2601 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2608 dlm_get_mle_inuse(mle); in dlm_migrate_lockres()
2643 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2644 dlm_put_mle(mle); in dlm_migrate_lockres()
2645 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2646 } else if (mle) { in dlm_migrate_lockres()
2647 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_lockres()
2648 mle = NULL; in dlm_migrate_lockres()
2674 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2675 dlm_put_mle(mle); in dlm_migrate_lockres()
2676 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2700 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2701 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2705 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2720 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2721 dlm_put_mle(mle); in dlm_migrate_lockres()
2722 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
2743 dlm_mle_detach_hb_events(dlm, mle); in dlm_migrate_lockres()
2744 dlm_put_mle_inuse(mle); in dlm_migrate_lockres()
3125 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; in dlm_migrate_request_handler() local
3138 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); in dlm_migrate_request_handler()
3140 if (!mle) { in dlm_migrate_request_handler()
3157 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3167 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3173 kmem_cache_free(dlm_mle_cache, mle); in dlm_migrate_request_handler()
3201 struct dlm_master_list_entry *mle, in dlm_add_migration_mle() argument
3260 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3261 mle->new_master = new_master; in dlm_add_migration_mle()
3264 mle->master = master; in dlm_add_migration_mle()
3266 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3267 __dlm_insert_mle(dlm, mle); in dlm_add_migration_mle()
3276 struct dlm_master_list_entry *mle) in dlm_reset_mleres_owner() argument
3281 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3282 mle->mnamehash); in dlm_reset_mleres_owner()
3294 __dlm_mle_detach_hb_events(dlm, mle); in dlm_reset_mleres_owner()
3298 __dlm_put_mle(mle); in dlm_reset_mleres_owner()
3306 struct dlm_master_list_entry *mle) in dlm_clean_migration_mle() argument
3308 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_migration_mle()
3310 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3311 __dlm_unlink_mle(dlm, mle); in dlm_clean_migration_mle()
3312 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3313 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3315 wake_up(&mle->wq); in dlm_clean_migration_mle()
3319 struct dlm_master_list_entry *mle, u8 dead_node) in dlm_clean_block_mle() argument
3323 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3325 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3326 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_clean_block_mle()
3330 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3337 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3338 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3339 wake_up(&mle->wq); in dlm_clean_block_mle()
3342 __dlm_mle_detach_hb_events(dlm, mle); in dlm_clean_block_mle()
3343 __dlm_put_mle(mle); in dlm_clean_block_mle()
3349 struct dlm_master_list_entry *mle; in dlm_clean_master_list() local
3363 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_clean_master_list()
3364 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3365 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3366 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3371 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3377 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3378 dlm_clean_block_mle(dlm, mle, dead_node); in dlm_clean_master_list()
3393 if (mle->master != dead_node && in dlm_clean_master_list()
3394 mle->new_master != dead_node) in dlm_clean_master_list()
3397 if (mle->new_master == dead_node && mle->inuse) { in dlm_clean_master_list()
3402 mle->master); in dlm_clean_master_list()
3408 dlm_clean_migration_mle(dlm, mle); in dlm_clean_master_list()
3411 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3412 mle->new_master); in dlm_clean_master_list()
3419 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3425 __dlm_put_mle(mle); in dlm_clean_master_list()
3552 struct dlm_master_list_entry *mle; in dlm_force_free_mles() local
3569 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_force_free_mles()
3570 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3571 mlog(ML_ERROR, "bad mle: %p\n", mle); in dlm_force_free_mles()
3572 dlm_print_one_mle(mle); in dlm_force_free_mles()
3574 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3575 wake_up(&mle->wq); in dlm_force_free_mles()
3577 __dlm_unlink_mle(dlm, mle); in dlm_force_free_mles()
3578 __dlm_mle_detach_hb_events(dlm, mle); in dlm_force_free_mles()
3579 __dlm_put_mle(mle); in dlm_force_free_mles()