Lines Matching +full:spin +full:- +full:table
35 .. table:: Expected Results
37 +------------------------------------+------------------------------------+
41 +------------------------------------+------------------------------------+
43 +------------------------------------+------------------------------------+
45 +------------------------------------+------------------------------------+
47 +------------------------------------+------------------------------------+
49 +------------------------------------+------------------------------------+
51 +------------------------------------+------------------------------------+
55 .. table:: Possible Results
57 +------------------------------------+------------------------------------+
61 +------------------------------------+------------------------------------+
63 +------------------------------------+------------------------------------+
65 +------------------------------------+------------------------------------+
67 +------------------------------------+------------------------------------+
69 +------------------------------------+------------------------------------+
71 +------------------------------------+------------------------------------+
75 ------------------------------------
102 -----------------------------------------------------
106 single-holder lock: if you can't get the spinlock, you keep trying
122 ------------------------------
126 design decision: when no-one else can run at the same time, there is no
142 ----------------------------
154 nf_register_sockopt(). Registration and de-registration
162 -----------------------------------------
178 This works perfectly for UP as well: the spin lock vanishes, and this
184 -----------------------------------------
190 ---------------------------------------
197 -------------------------------
219 ------------------------
226 The same softirq can run on the other CPUs: you can use a per-CPU array
227 (see `Per-CPU Data`_) for better performance. If you're
250 ----------------------------------------------
266 This works perfectly for UP as well: the spin lock vanishes, and this
284 -------------------------------------
288 architecture-specific whether all interrupts are disabled inside irq
296 - If you are in a process context (any syscall) and want to lock other
300 - Otherwise (== data can be touched in an interrupt), use
304 - Avoid holding spinlock for more than 5 lines of code and across any
307 Table of Minimum Requirements
308 -----------------------------
310 The following table lists the **minimum** locking requirements between
335 Table: Table of Locking Requirements
337 +--------+----------------------------+
339 +--------+----------------------------+
341 +--------+----------------------------+
343 +--------+----------------------------+
345 +--------+----------------------------+
347 +--------+----------------------------+
349 Table: Legend for Locking Requirements Table
360 spin_trylock() does not spin but returns non-zero if it
363 disabled the contexts that might interrupt you and acquire the spin
367 non-zero if it could lock the mutex on the first try or 0 if not. This
379 -------------------
411 if (i->id == id) {
412 i->popularity++;
422 list_del(&obj->list);
424 cache_num--;
430 list_add(&obj->list, &cache);
434 if (!outcast || i->popularity < outcast->popularity)
446 return -ENOMEM;
448 strscpy(obj->name, name, sizeof(obj->name));
449 obj->id = id;
450 obj->popularity = 0;
468 int ret = -ENOENT;
474 strcpy(name, obj->name);
488 grabbing the lock. This is safe, as no-one else can access it until we
492 --------------------------------
498 The change is shown below, in standard patch format: the ``-`` are lines
503 --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100
504 +++ cache.c.interrupt 2003-12-09 14:07:49.000000000 +1100
505 @@ -12,7 +12,7 @@
509 -static DEFINE_MUTEX(cache_lock);
514 @@ -55,6 +55,7 @@
521 return -ENOMEM;
522 @@ -63,30 +64,33 @@
523 obj->id = id;
524 obj->popularity = 0;
526 - mutex_lock(&cache_lock);
529 - mutex_unlock(&cache_lock);
536 - mutex_lock(&cache_lock);
541 - mutex_unlock(&cache_lock);
548 int ret = -ENOENT;
551 - mutex_lock(&cache_lock);
556 strcpy(name, obj->name);
558 - mutex_unlock(&cache_lock);
575 ----------------------------------
583 we'd need to make this non-static so the rest of the code can use it.
590 worse, add another object, re-using the same address.
592 As there is only one lock, you can't hold it forever: no-one else would
602 --- cache.c.interrupt 2003-12-09 14:25:43.000000000 +1100
603 +++ cache.c.refcnt 2003-12-09 14:33:05.000000000 +1100
604 @@ -7,6 +7,7 @@
612 @@ -17,6 +18,35 @@
618 + if (--obj->refcnt == 0)
624 + obj->refcnt++;
648 @@ -35,6 +65,7 @@
651 list_del(&obj->list);
653 cache_num--;
656 @@ -63,6 +94,7 @@
657 strscpy(obj->name, name, sizeof(obj->name));
658 obj->id = id;
659 obj->popularity = 0;
660 + obj->refcnt = 1; /* The cache holds a reference */
664 @@ -79,18 +111,15 @@
668 -int cache_find(int id, char *name)
672 - int ret = -ENOENT;
677 - if (obj) {
678 - ret = 0;
679 - strcpy(name, obj->name);
680 - }
684 - return ret;
706 although for anything non-trivial using spinlocks is clearer. The
713 --- cache.c.refcnt 2003-12-09 15:00:35.000000000 +1100
714 +++ cache.c.refcnt-atomic 2003-12-11 15:49:42.000000000 +1100
715 @@ -7,7 +7,7 @@
719 - unsigned int refcnt;
724 @@ -18,33 +18,15 @@
728 -static void __object_put(struct object *obj)
729 -{
730 - if (--obj->refcnt == 0)
731 - kfree(obj);
732 -}
733 -
734 -static void __object_get(struct object *obj)
735 -{
736 - obj->refcnt++;
737 -}
738 -
741 - unsigned long flags;
742 -
743 - spin_lock_irqsave(&cache_lock, flags);
744 - __object_put(obj);
745 - spin_unlock_irqrestore(&cache_lock, flags);
746 + if (atomic_dec_and_test(&obj->refcnt))
752 - unsigned long flags;
753 -
754 - spin_lock_irqsave(&cache_lock, flags);
755 - __object_get(obj);
756 - spin_unlock_irqrestore(&cache_lock, flags);
757 + atomic_inc(&obj->refcnt);
761 @@ -65,7 +47,7 @@
764 list_del(&obj->list);
765 - __object_put(obj);
767 cache_num--;
770 @@ -94,7 +76,7 @@
771 strscpy(obj->name, name, sizeof(obj->name));
772 obj->id = id;
773 obj->popularity = 0;
774 - obj->refcnt = 1; /* The cache holds a reference */
775 + atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
779 @@ -119,7 +101,7 @@
783 - __object_get(obj);
790 ---------------------------------
796 - You can make ``cache_lock`` non-static, and tell people to grab that
799 - You can provide a cache_obj_rename() which grabs this
803 - You can make the ``cache_lock`` protect only the cache itself, and
806 Theoretically, you can make the locks as fine-grained as one lock for
810 - One lock which protects the infrastructure (the ``cache`` list in
813 - One lock which protects the infrastructure (including the list
817 - Multiple locks to protect the infrastructure (eg. one lock per hash
818 chain), possibly with a separate per-object lock.
820 Here is the "lock-per-object" implementation:
824 --- cache.c.refcnt-atomic 2003-12-11 15:50:54.000000000 +1100
825 +++ cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
826 @@ -6,11 +6,17 @@
841 - int popularity;
845 @@ -77,6 +84,7 @@
846 obj->id = id;
847 obj->popularity = 0;
848 atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
849 + spin_lock_init(&obj->lock);
855 ``cache_lock`` rather than the per-object lock: this is because it (like
875 -----------------------------
878 twice: it will spin forever, waiting for the lock to be released
881 stay-up-five-nights-talk-to-fluffy-code-bunnies kind of problem.
886 by the softirq while it holds the lock, and the softirq will then spin
899 A more complex problem is the so-called 'deadly embrace', involving two
900 or more locks. Say you have a hash table: each entry in the table is a
913 +-----------------------+-----------------------+
916 | Grab lock A -> OK | Grab lock B -> OK |
917 +-----------------------+-----------------------+
918 | Grab lock B -> spin | Grab lock A -> spin |
919 +-----------------------+-----------------------+
921 Table: Consequences
923 The two CPUs will spin forever, waiting for the other to give up their
927 -------------------
936 are never held around calls to non-trivial functions outside the same
955 -------------------------------
969 struct foo *next = list->next;
970 del_timer(&list->timer);
992 struct foo *next = list->next;
993 if (!del_timer(&list->timer)) {
1029 the last one to grab the lock (ie. is the lock cache-hot for this CPU):
1032 increment takes about 58ns, a lock which is cache-hot on this CPU takes
1038 by splitting locks into parts (such as in our final per-object-lock
1047 ------------------------
1062 --------------------------------
1075 new->next = list->next;
1077 list->next = new;
1099 list->next = old->next;
1108 don't realize that the pre-fetched contents is wrong when the ``next``
1122 destroy the object once all pre-existing readers are finished.
1124 until all pre-existing are finished.
1140 --- cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
1141 +++ cache.c.rcupdate 2003-12-11 17:55:14.000000000 +1100
1142 @@ -1,15 +1,18 @@
1152 - /* These two protected by cache_lock. */
1162 @@ -40,7 +43,7 @@
1166 - list_for_each_entry(i, &cache, list) {
1168 if (i->id == id) {
1169 i->popularity++;
1171 @@ -49,19 +52,25 @@
1185 - list_del(&obj->list);
1186 - object_put(obj);
1187 + list_del_rcu(&obj->list);
1188 cache_num--;
1189 + call_rcu(&obj->rcu, cache_delete_rcu);
1195 - list_add(&obj->list, &cache);
1196 + list_add_rcu(&obj->list, &cache);
1200 @@ -104,12 +114,11 @@
1204 - unsigned long flags;
1206 - spin_lock_irqsave(&cache_lock, flags);
1211 - spin_unlock_irqrestore(&cache_lock, flags);
1236 __cache_find() by making it non-static, and such
1243 Per-CPU Data
1244 ------------
1248 count of a common condition, you could use a spin lock and a single
1257 Of particular use for simple per-cpu counters is the ``local_t`` type,
1267 ----------------------------------------
1299 --------------------------
1307 - Accesses to userspace:
1309 - copy_from_user()
1311 - copy_to_user()
1313 - get_user()
1315 - put_user()
1317 - kmalloc(GP_KERNEL) <kmalloc>`
1319 - mutex_lock_interruptible() and
1329 --------------------------------
1334 - printk()
1336 - kfree()
1338 - add_timer() and del_timer()
1343 .. kernel-doc:: include/linux/mutex.h
1346 .. kernel-doc:: kernel/locking/mutex.c
1352 .. kernel-doc:: kernel/futex/core.c
1355 .. kernel-doc:: kernel/futex/futex.h
1358 .. kernel-doc:: kernel/futex/pi.c
1361 .. kernel-doc:: kernel/futex/requeue.c
1364 .. kernel-doc:: kernel/futex/waitwake.c
1370 - ``Documentation/locking/spinlocks.rst``: Linus Torvalds' spinlocking
1373 - Unix Systems for Modern Architectures: Symmetric Multiprocessing and
1420 Symmetric Multi-Processor: kernels compiled for multiple-CPU machines.
1433 A dynamically-registrable software interrupt, which is guaranteed to
1437 A dynamically-registrable software interrupt, which is run at (or close
1442 Uni-Processor: Non-SMP. (``CONFIG_SMP=n``).