Lines Matching refs:vnode

16 static void afs_next_locker(struct afs_vnode *vnode, int error);
25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state) in afs_set_lock_state() argument
27 _debug("STATE %u -> %u", vnode->lock_state, state); in afs_set_lock_state()
28 vnode->lock_state = state; in afs_set_lock_state()
36 void afs_lock_may_be_available(struct afs_vnode *vnode) in afs_lock_may_be_available() argument
38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_may_be_available()
40 spin_lock(&vnode->lock); in afs_lock_may_be_available()
41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) in afs_lock_may_be_available()
42 afs_next_locker(vnode, 0); in afs_lock_may_be_available()
43 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0); in afs_lock_may_be_available()
44 spin_unlock(&vnode->lock); in afs_lock_may_be_available()
51 static void afs_schedule_lock_extension(struct afs_vnode *vnode) in afs_schedule_lock_extension() argument
56 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2); in afs_schedule_lock_extension()
64 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j); in afs_schedule_lock_extension()
74 struct afs_vnode *vnode = op->file[0].vnode; in afs_lock_op_done() local
77 spin_lock(&vnode->lock); in afs_lock_op_done()
78 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); in afs_lock_op_done()
79 vnode->locked_at = call->issue_time; in afs_lock_op_done()
80 afs_schedule_lock_extension(vnode); in afs_lock_op_done()
81 spin_unlock(&vnode->lock); in afs_lock_op_done()
90 static void afs_grant_locks(struct afs_vnode *vnode) in afs_grant_locks() argument
93 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE); in afs_grant_locks()
95 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { in afs_grant_locks()
99 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); in afs_grant_locks()
101 trace_afs_flock_op(vnode, p, afs_flock_op_grant); in afs_grant_locks()
111 static void afs_next_locker(struct afs_vnode *vnode, int error) in afs_next_locker() argument
114 struct key *key = vnode->lock_key; in afs_next_locker()
119 if (vnode->lock_type == AFS_LOCK_WRITE) in afs_next_locker()
122 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { in afs_next_locker()
138 vnode->lock_key = NULL; in afs_next_locker()
142 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); in afs_next_locker()
144 trace_afs_flock_op(vnode, next, afs_flock_op_wake); in afs_next_locker()
147 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE); in afs_next_locker()
148 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0); in afs_next_locker()
158 static void afs_kill_lockers_enoent(struct afs_vnode *vnode) in afs_kill_lockers_enoent() argument
162 afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED); in afs_kill_lockers_enoent()
164 while (!list_empty(&vnode->pending_locks)) { in afs_kill_lockers_enoent()
165 p = list_entry(vnode->pending_locks.next, in afs_kill_lockers_enoent()
172 key_put(vnode->lock_key); in afs_kill_lockers_enoent()
173 vnode->lock_key = NULL; in afs_kill_lockers_enoent()
192 static int afs_set_lock(struct afs_vnode *vnode, struct key *key, in afs_set_lock() argument
198 vnode->volume->name, in afs_set_lock()
199 vnode->fid.vid, in afs_set_lock()
200 vnode->fid.vnode, in afs_set_lock()
201 vnode->fid.unique, in afs_set_lock()
204 op = afs_alloc_operation(key, vnode->volume); in afs_set_lock()
208 afs_op_set_vnode(op, 0, vnode); in afs_set_lock()
224 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) in afs_extend_lock() argument
229 vnode->volume->name, in afs_extend_lock()
230 vnode->fid.vid, in afs_extend_lock()
231 vnode->fid.vnode, in afs_extend_lock()
232 vnode->fid.unique, in afs_extend_lock()
235 op = afs_alloc_operation(key, vnode->volume); in afs_extend_lock()
239 afs_op_set_vnode(op, 0, vnode); in afs_extend_lock()
255 static int afs_release_lock(struct afs_vnode *vnode, struct key *key) in afs_release_lock() argument
260 vnode->volume->name, in afs_release_lock()
261 vnode->fid.vid, in afs_release_lock()
262 vnode->fid.vnode, in afs_release_lock()
263 vnode->fid.unique, in afs_release_lock()
266 op = afs_alloc_operation(key, vnode->volume); in afs_release_lock()
270 afs_op_set_vnode(op, 0, vnode); in afs_release_lock()
284 struct afs_vnode *vnode = in afs_lock_work() local
289 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_work()
291 spin_lock(&vnode->lock); in afs_lock_work()
294 _debug("wstate %u for %p", vnode->lock_state, vnode); in afs_lock_work()
295 switch (vnode->lock_state) { in afs_lock_work()
297 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING); in afs_lock_work()
298 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0); in afs_lock_work()
299 spin_unlock(&vnode->lock); in afs_lock_work()
303 ret = afs_release_lock(vnode, vnode->lock_key); in afs_lock_work()
304 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) { in afs_lock_work()
305 trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail, in afs_lock_work()
309 vnode->fid.vid, vnode->fid.vnode, ret); in afs_lock_work()
312 spin_lock(&vnode->lock); in afs_lock_work()
314 afs_kill_lockers_enoent(vnode); in afs_lock_work()
316 afs_next_locker(vnode, 0); in afs_lock_work()
317 spin_unlock(&vnode->lock); in afs_lock_work()
326 ASSERT(!list_empty(&vnode->granted_locks)); in afs_lock_work()
328 key = key_get(vnode->lock_key); in afs_lock_work()
329 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING); in afs_lock_work()
330 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0); in afs_lock_work()
331 spin_unlock(&vnode->lock); in afs_lock_work()
333 ret = afs_extend_lock(vnode, key); /* RPC */ in afs_lock_work()
337 trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail, in afs_lock_work()
340 vnode->fid.vid, vnode->fid.vnode, ret); in afs_lock_work()
343 spin_lock(&vnode->lock); in afs_lock_work()
346 afs_kill_lockers_enoent(vnode); in afs_lock_work()
347 spin_unlock(&vnode->lock); in afs_lock_work()
351 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING) in afs_lock_work()
353 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); in afs_lock_work()
356 queue_delayed_work(afs_lock_manager, &vnode->lock_work, in afs_lock_work()
358 spin_unlock(&vnode->lock); in afs_lock_work()
370 afs_next_locker(vnode, 0); in afs_lock_work()
371 spin_unlock(&vnode->lock); in afs_lock_work()
375 afs_kill_lockers_enoent(vnode); in afs_lock_work()
376 spin_unlock(&vnode->lock); in afs_lock_work()
381 spin_unlock(&vnode->lock); in afs_lock_work()
393 static void afs_defer_unlock(struct afs_vnode *vnode) in afs_defer_unlock() argument
395 _enter("%u", vnode->lock_state); in afs_defer_unlock()
397 if (list_empty(&vnode->granted_locks) && in afs_defer_unlock()
398 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED || in afs_defer_unlock()
399 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) { in afs_defer_unlock()
400 cancel_delayed_work(&vnode->lock_work); in afs_defer_unlock()
402 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK); in afs_defer_unlock()
403 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0); in afs_defer_unlock()
404 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); in afs_defer_unlock()
412 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key, in afs_do_setlk_check() argument
421 ret = afs_validate(vnode, key); in afs_do_setlk_check()
428 ret = afs_check_permit(vnode, key, &access); in afs_do_setlk_check()
455 struct afs_vnode *vnode = AFS_FS_I(inode); in afs_do_setlk() local
466 vnode->fid.vid, vnode->fid.vnode, in afs_do_setlk()
478 ret = afs_do_setlk_check(vnode, key, mode, type); in afs_do_setlk()
482 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock); in afs_do_setlk()
498 spin_lock(&vnode->lock); in afs_do_setlk()
499 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); in afs_do_setlk()
502 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) in afs_do_setlk()
509 _debug("try %u", vnode->lock_state); in afs_do_setlk()
510 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) { in afs_do_setlk()
513 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_do_setlk()
518 if (vnode->lock_type == AFS_LOCK_WRITE) { in afs_do_setlk()
520 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_do_setlk()
526 if (vnode->lock_state == AFS_VNODE_LOCK_NONE && in afs_do_setlk()
530 if (vnode->status.lock_count == -1) in afs_do_setlk()
533 if (vnode->status.lock_count != 0) in afs_do_setlk()
538 if (vnode->lock_state != AFS_VNODE_LOCK_NONE) in afs_do_setlk()
550 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0); in afs_do_setlk()
551 vnode->lock_key = key_get(key); in afs_do_setlk()
552 vnode->lock_type = type; in afs_do_setlk()
553 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); in afs_do_setlk()
554 spin_unlock(&vnode->lock); in afs_do_setlk()
556 ret = afs_set_lock(vnode, key, type); /* RPC */ in afs_do_setlk()
558 spin_lock(&vnode->lock); in afs_do_setlk()
566 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret); in afs_do_setlk()
568 afs_next_locker(vnode, ret); in afs_do_setlk()
573 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); in afs_do_setlk()
575 afs_kill_lockers_enoent(vnode); in afs_do_setlk()
580 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); in afs_do_setlk()
582 afs_next_locker(vnode, 0); in afs_do_setlk()
590 ASSERT(list_empty(&vnode->granted_locks)); in afs_do_setlk()
591 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); in afs_do_setlk()
595 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); in afs_do_setlk()
596 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type); in afs_do_setlk()
597 afs_grant_locks(vnode); in afs_do_setlk()
602 spin_unlock(&vnode->lock); in afs_do_setlk()
609 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0); in afs_do_setlk()
611 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret); in afs_do_setlk()
619 afs_validate(vnode, key); in afs_do_setlk()
626 afs_next_locker(vnode, 0); in afs_do_setlk()
631 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB); in afs_do_setlk()
632 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret); in afs_do_setlk()
633 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5); in afs_do_setlk()
641 spin_unlock(&vnode->lock); in afs_do_setlk()
643 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0); in afs_do_setlk()
646 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret); in afs_do_setlk()
649 spin_lock(&vnode->lock); in afs_do_setlk()
661 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB); in afs_do_setlk()
662 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); in afs_do_setlk()
672 spin_unlock(&vnode->lock); in afs_do_setlk()
688 spin_lock(&vnode->lock); in afs_do_setlk()
690 afs_defer_unlock(vnode); in afs_do_setlk()
693 spin_unlock(&vnode->lock); in afs_do_setlk()
704 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); in afs_do_unlk() local
707 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); in afs_do_unlk()
709 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock); in afs_do_unlk()
715 _leave(" = %d [%u]", ret, vnode->lock_state); in afs_do_unlk()
724 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); in afs_do_getlk() local
730 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) in afs_do_getlk()
739 ret = afs_fetch_status(vnode, key, false, NULL); in afs_do_getlk()
743 lock_count = READ_ONCE(vnode->status.lock_count); in afs_do_getlk()
766 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); in afs_lock() local
771 vnode->fid.vid, vnode->fid.vnode, cmd, in afs_lock()
779 trace_afs_flock_op(vnode, fl, afs_flock_op_lock); in afs_lock()
792 trace_afs_flock_op(vnode, fl, op); in afs_lock()
801 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); in afs_flock() local
806 vnode->fid.vid, vnode->fid.vnode, cmd, in afs_flock()
820 trace_afs_flock_op(vnode, fl, afs_flock_op_flock); in afs_flock()
834 trace_afs_flock_op(vnode, fl, op); in afs_flock()
846 struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file)); in afs_fl_copy_lock() local
852 spin_lock(&vnode->lock); in afs_fl_copy_lock()
853 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock); in afs_fl_copy_lock()
855 spin_unlock(&vnode->lock); in afs_fl_copy_lock()
864 struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file)); in afs_fl_release_private() local
868 spin_lock(&vnode->lock); in afs_fl_release_private()
870 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock); in afs_fl_release_private()
872 if (list_empty(&vnode->granted_locks)) in afs_fl_release_private()
873 afs_defer_unlock(vnode); in afs_fl_release_private()
875 _debug("state %u for %p", vnode->lock_state, vnode); in afs_fl_release_private()
876 spin_unlock(&vnode->lock); in afs_fl_release_private()