Lines Matching refs:mg

107 static void __mount_group_release(struct mount_group *mg)  in __mount_group_release()  argument
111 for (i = 0; i < mg->num_sessions; i++) in __mount_group_release()
112 cifs_put_smb_ses(mg->sessions[i]); in __mount_group_release()
113 kfree(mg); in __mount_group_release()
118 struct mount_group *mg = container_of(kref, struct mount_group, refcount); in mount_group_release() local
121 list_del(&mg->list); in mount_group_release()
123 __mount_group_release(mg); in mount_group_release()
128 struct mount_group *mg; in find_mount_group_locked() local
130 list_for_each_entry(mg, &mount_group_list, list) { in find_mount_group_locked()
131 if (uuid_equal(&mg->id, id)) in find_mount_group_locked()
132 return mg; in find_mount_group_locked()
139 struct mount_group *mg; in __get_mount_group_locked() local
141 mg = find_mount_group_locked(id); in __get_mount_group_locked()
142 if (!IS_ERR(mg)) in __get_mount_group_locked()
143 return mg; in __get_mount_group_locked()
145 mg = kmalloc(sizeof(*mg), GFP_KERNEL); in __get_mount_group_locked()
146 if (!mg) in __get_mount_group_locked()
148 kref_init(&mg->refcount); in __get_mount_group_locked()
149 uuid_copy(&mg->id, id); in __get_mount_group_locked()
150 mg->num_sessions = 0; in __get_mount_group_locked()
151 spin_lock_init(&mg->lock); in __get_mount_group_locked()
152 list_add(&mg->list, &mount_group_list); in __get_mount_group_locked()
153 return mg; in __get_mount_group_locked()
158 struct mount_group *mg; in get_mount_group() local
161 mg = __get_mount_group_locked(id); in get_mount_group()
162 if (!IS_ERR(mg)) in get_mount_group()
163 kref_get(&mg->refcount); in get_mount_group()
166 return mg; in get_mount_group()
171 struct mount_group *mg, *tmp_mg; in free_mount_group_list() local
173 list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) { in free_mount_group_list()
174 list_del_init(&mg->list); in free_mount_group_list()
175 __mount_group_release(mg); in free_mount_group_list()
1192 struct mount_group *mg; in dfs_cache_add_refsrv_session() local
1197 mg = get_mount_group(mount_id); in dfs_cache_add_refsrv_session()
1198 if (WARN_ON_ONCE(IS_ERR(mg))) in dfs_cache_add_refsrv_session()
1201 spin_lock(&mg->lock); in dfs_cache_add_refsrv_session()
1202 if (mg->num_sessions < ARRAY_SIZE(mg->sessions)) in dfs_cache_add_refsrv_session()
1203 mg->sessions[mg->num_sessions++] = ses; in dfs_cache_add_refsrv_session()
1204 spin_unlock(&mg->lock); in dfs_cache_add_refsrv_session()
1205 kref_put(&mg->refcount, mount_group_release); in dfs_cache_add_refsrv_session()
1217 struct mount_group *mg; in dfs_cache_put_refsrv_sessions() local
1223 mg = find_mount_group_locked(mount_id); in dfs_cache_put_refsrv_sessions()
1224 if (IS_ERR(mg)) { in dfs_cache_put_refsrv_sessions()
1229 kref_put(&mg->refcount, mount_group_release); in dfs_cache_put_refsrv_sessions()
1464 struct mount_group *mg; in dfs_cache_remount_fs() local
1485 mg = find_mount_group_locked(&cifs_sb->dfs_mount_id); in dfs_cache_remount_fs()
1486 if (IS_ERR(mg)) { in dfs_cache_remount_fs()
1489 return PTR_ERR(mg); in dfs_cache_remount_fs()
1491 kref_get(&mg->refcount); in dfs_cache_remount_fs()
1494 spin_lock(&mg->lock); in dfs_cache_remount_fs()
1495 memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0])); in dfs_cache_remount_fs()
1496 spin_unlock(&mg->lock); in dfs_cache_remount_fs()
1510 kref_put(&mg->refcount, mount_group_release); in dfs_cache_remount_fs()
1642 struct mount_group *mg, *tmp_mg; in refresh_cache_worker() local
1651 list_for_each_entry(mg, &mount_group_list, list) { in refresh_cache_worker()
1652 kref_get(&mg->refcount); in refresh_cache_worker()
1653 list_add(&mg->refresh_list, &mglist); in refresh_cache_worker()
1658 list_for_each_entry(mg, &mglist, refresh_list) { in refresh_cache_worker()
1662 spin_lock(&mg->lock); in refresh_cache_worker()
1663 if (i + mg->num_sessions > max_sessions) in refresh_cache_worker()
1666 count = mg->num_sessions; in refresh_cache_worker()
1667 memcpy(&sessions[i], mg->sessions, count * sizeof(mg->sessions[0])); in refresh_cache_worker()
1668 spin_unlock(&mg->lock); in refresh_cache_worker()
1678 list_for_each_entry_safe(mg, tmp_mg, &mglist, refresh_list) { in refresh_cache_worker()
1679 list_del_init(&mg->refresh_list); in refresh_cache_worker()
1680 kref_put(&mg->refcount, mount_group_release); in refresh_cache_worker()