1 /*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include "flow.h"
20 #include "datapath.h"
21 #include "flow_netlink.h"
22 #include <linux/uaccess.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <net/llc_pdu.h>
28 #include <linux/kernel.h>
29 #include <linux/jhash.h>
30 #include <linux/jiffies.h>
31 #include <linux/llc.h>
32 #include <linux/module.h>
33 #include <linux/in.h>
34 #include <linux/rcupdate.h>
35 #include <linux/cpumask.h>
36 #include <linux/if_arp.h>
37 #include <linux/ip.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
45 #include <net/ip.h>
46 #include <net/ipv6.h>
47 #include <net/ndisc.h>
48
49 #define TBL_MIN_BUCKETS 1024
50 #define REHASH_INTERVAL (10 * 60 * HZ)
51
52 static struct kmem_cache *flow_cache;
53 struct kmem_cache *flow_stats_cache __read_mostly;
54
range_n_bytes(const struct sw_flow_key_range * range)55 static u16 range_n_bytes(const struct sw_flow_key_range *range)
56 {
57 return range->end - range->start;
58 }
59
ovs_flow_mask_key(struct sw_flow_key * dst,const struct sw_flow_key * src,bool full,const struct sw_flow_mask * mask)60 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
61 bool full, const struct sw_flow_mask *mask)
62 {
63 int start = full ? 0 : mask->range.start;
64 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
65 const long *m = (const long *)((const u8 *)&mask->key + start);
66 const long *s = (const long *)((const u8 *)src + start);
67 long *d = (long *)((u8 *)dst + start);
68 int i;
69
70 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
71 * if 'full' is false the memory outside of the 'mask->range' is left
72 * uninitialized. This can be used as an optimization when further
73 * operations on 'dst' only use contents within 'mask->range'.
74 */
75 for (i = 0; i < len; i += sizeof(long))
76 *d++ = *s++ & *m++;
77 }
78
ovs_flow_alloc(void)79 struct sw_flow *ovs_flow_alloc(void)
80 {
81 struct sw_flow *flow;
82 struct flow_stats *stats;
83
84 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
85 if (!flow)
86 return ERR_PTR(-ENOMEM);
87
88 flow->stats_last_writer = -1;
89
90 /* Initialize the default stat node. */
91 stats = kmem_cache_alloc_node(flow_stats_cache,
92 GFP_KERNEL | __GFP_ZERO,
93 node_online(0) ? 0 : NUMA_NO_NODE);
94 if (!stats)
95 goto err;
96
97 spin_lock_init(&stats->lock);
98
99 RCU_INIT_POINTER(flow->stats[0], stats);
100
101 cpumask_set_cpu(0, &flow->cpu_used_mask);
102
103 return flow;
104 err:
105 kmem_cache_free(flow_cache, flow);
106 return ERR_PTR(-ENOMEM);
107 }
108
ovs_flow_tbl_count(const struct flow_table * table)109 int ovs_flow_tbl_count(const struct flow_table *table)
110 {
111 return table->count;
112 }
113
alloc_buckets(unsigned int n_buckets)114 static struct flex_array *alloc_buckets(unsigned int n_buckets)
115 {
116 struct flex_array *buckets;
117 int i, err;
118
119 buckets = flex_array_alloc(sizeof(struct hlist_head),
120 n_buckets, GFP_KERNEL);
121 if (!buckets)
122 return NULL;
123
124 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
125 if (err) {
126 flex_array_free(buckets);
127 return NULL;
128 }
129
130 for (i = 0; i < n_buckets; i++)
131 INIT_HLIST_HEAD((struct hlist_head *)
132 flex_array_get(buckets, i));
133
134 return buckets;
135 }
136
flow_free(struct sw_flow * flow)137 static void flow_free(struct sw_flow *flow)
138 {
139 int cpu;
140
141 if (ovs_identifier_is_key(&flow->id))
142 kfree(flow->id.unmasked_key);
143 if (flow->sf_acts)
144 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
145 /* We open code this to make sure cpu 0 is always considered */
146 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
147 if (flow->stats[cpu])
148 kmem_cache_free(flow_stats_cache,
149 (struct flow_stats __force *)flow->stats[cpu]);
150 kmem_cache_free(flow_cache, flow);
151 }
152
rcu_free_flow_callback(struct rcu_head * rcu)153 static void rcu_free_flow_callback(struct rcu_head *rcu)
154 {
155 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
156
157 flow_free(flow);
158 }
159
ovs_flow_free(struct sw_flow * flow,bool deferred)160 void ovs_flow_free(struct sw_flow *flow, bool deferred)
161 {
162 if (!flow)
163 return;
164
165 if (deferred)
166 call_rcu(&flow->rcu, rcu_free_flow_callback);
167 else
168 flow_free(flow);
169 }
170
free_buckets(struct flex_array * buckets)171 static void free_buckets(struct flex_array *buckets)
172 {
173 flex_array_free(buckets);
174 }
175
176
__table_instance_destroy(struct table_instance * ti)177 static void __table_instance_destroy(struct table_instance *ti)
178 {
179 free_buckets(ti->buckets);
180 kfree(ti);
181 }
182
table_instance_alloc(int new_size)183 static struct table_instance *table_instance_alloc(int new_size)
184 {
185 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
186
187 if (!ti)
188 return NULL;
189
190 ti->buckets = alloc_buckets(new_size);
191
192 if (!ti->buckets) {
193 kfree(ti);
194 return NULL;
195 }
196 ti->n_buckets = new_size;
197 ti->node_ver = 0;
198 ti->keep_flows = false;
199 get_random_bytes(&ti->hash_seed, sizeof(u32));
200
201 return ti;
202 }
203
ovs_flow_tbl_init(struct flow_table * table)204 int ovs_flow_tbl_init(struct flow_table *table)
205 {
206 struct table_instance *ti, *ufid_ti;
207
208 ti = table_instance_alloc(TBL_MIN_BUCKETS);
209
210 if (!ti)
211 return -ENOMEM;
212
213 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
214 if (!ufid_ti)
215 goto free_ti;
216
217 rcu_assign_pointer(table->ti, ti);
218 rcu_assign_pointer(table->ufid_ti, ufid_ti);
219 INIT_LIST_HEAD(&table->mask_list);
220 table->last_rehash = jiffies;
221 table->count = 0;
222 table->ufid_count = 0;
223 return 0;
224
225 free_ti:
226 __table_instance_destroy(ti);
227 return -ENOMEM;
228 }
229
flow_tbl_destroy_rcu_cb(struct rcu_head * rcu)230 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
231 {
232 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
233
234 __table_instance_destroy(ti);
235 }
236
table_instance_destroy(struct table_instance * ti,struct table_instance * ufid_ti,bool deferred)237 static void table_instance_destroy(struct table_instance *ti,
238 struct table_instance *ufid_ti,
239 bool deferred)
240 {
241 int i;
242
243 if (!ti)
244 return;
245
246 BUG_ON(!ufid_ti);
247 if (ti->keep_flows)
248 goto skip_flows;
249
250 for (i = 0; i < ti->n_buckets; i++) {
251 struct sw_flow *flow;
252 struct hlist_head *head = flex_array_get(ti->buckets, i);
253 struct hlist_node *n;
254 int ver = ti->node_ver;
255 int ufid_ver = ufid_ti->node_ver;
256
257 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
258 hlist_del_rcu(&flow->flow_table.node[ver]);
259 if (ovs_identifier_is_ufid(&flow->id))
260 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
261 ovs_flow_free(flow, deferred);
262 }
263 }
264
265 skip_flows:
266 if (deferred) {
267 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
268 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
269 } else {
270 __table_instance_destroy(ti);
271 __table_instance_destroy(ufid_ti);
272 }
273 }
274
275 /* No need for locking this function is called from RCU callback or
276 * error path.
277 */
ovs_flow_tbl_destroy(struct flow_table * table)278 void ovs_flow_tbl_destroy(struct flow_table *table)
279 {
280 struct table_instance *ti = rcu_dereference_raw(table->ti);
281 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
282
283 table_instance_destroy(ti, ufid_ti, false);
284 }
285
ovs_flow_tbl_dump_next(struct table_instance * ti,u32 * bucket,u32 * last)286 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
287 u32 *bucket, u32 *last)
288 {
289 struct sw_flow *flow;
290 struct hlist_head *head;
291 int ver;
292 int i;
293
294 ver = ti->node_ver;
295 while (*bucket < ti->n_buckets) {
296 i = 0;
297 head = flex_array_get(ti->buckets, *bucket);
298 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
299 if (i < *last) {
300 i++;
301 continue;
302 }
303 *last = i + 1;
304 return flow;
305 }
306 (*bucket)++;
307 *last = 0;
308 }
309
310 return NULL;
311 }
312
find_bucket(struct table_instance * ti,u32 hash)313 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
314 {
315 hash = jhash_1word(hash, ti->hash_seed);
316 return flex_array_get(ti->buckets,
317 (hash & (ti->n_buckets - 1)));
318 }
319
table_instance_insert(struct table_instance * ti,struct sw_flow * flow)320 static void table_instance_insert(struct table_instance *ti,
321 struct sw_flow *flow)
322 {
323 struct hlist_head *head;
324
325 head = find_bucket(ti, flow->flow_table.hash);
326 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
327 }
328
ufid_table_instance_insert(struct table_instance * ti,struct sw_flow * flow)329 static void ufid_table_instance_insert(struct table_instance *ti,
330 struct sw_flow *flow)
331 {
332 struct hlist_head *head;
333
334 head = find_bucket(ti, flow->ufid_table.hash);
335 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
336 }
337
flow_table_copy_flows(struct table_instance * old,struct table_instance * new,bool ufid)338 static void flow_table_copy_flows(struct table_instance *old,
339 struct table_instance *new, bool ufid)
340 {
341 int old_ver;
342 int i;
343
344 old_ver = old->node_ver;
345 new->node_ver = !old_ver;
346
347 /* Insert in new table. */
348 for (i = 0; i < old->n_buckets; i++) {
349 struct sw_flow *flow;
350 struct hlist_head *head;
351
352 head = flex_array_get(old->buckets, i);
353
354 if (ufid)
355 hlist_for_each_entry(flow, head,
356 ufid_table.node[old_ver])
357 ufid_table_instance_insert(new, flow);
358 else
359 hlist_for_each_entry(flow, head,
360 flow_table.node[old_ver])
361 table_instance_insert(new, flow);
362 }
363
364 old->keep_flows = true;
365 }
366
table_instance_rehash(struct table_instance * ti,int n_buckets,bool ufid)367 static struct table_instance *table_instance_rehash(struct table_instance *ti,
368 int n_buckets, bool ufid)
369 {
370 struct table_instance *new_ti;
371
372 new_ti = table_instance_alloc(n_buckets);
373 if (!new_ti)
374 return NULL;
375
376 flow_table_copy_flows(ti, new_ti, ufid);
377
378 return new_ti;
379 }
380
ovs_flow_tbl_flush(struct flow_table * flow_table)381 int ovs_flow_tbl_flush(struct flow_table *flow_table)
382 {
383 struct table_instance *old_ti, *new_ti;
384 struct table_instance *old_ufid_ti, *new_ufid_ti;
385
386 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
387 if (!new_ti)
388 return -ENOMEM;
389 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390 if (!new_ufid_ti)
391 goto err_free_ti;
392
393 old_ti = ovsl_dereference(flow_table->ti);
394 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
395
396 rcu_assign_pointer(flow_table->ti, new_ti);
397 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
398 flow_table->last_rehash = jiffies;
399 flow_table->count = 0;
400 flow_table->ufid_count = 0;
401
402 table_instance_destroy(old_ti, old_ufid_ti, true);
403 return 0;
404
405 err_free_ti:
406 __table_instance_destroy(new_ti);
407 return -ENOMEM;
408 }
409
flow_hash(const struct sw_flow_key * key,const struct sw_flow_key_range * range)410 static u32 flow_hash(const struct sw_flow_key *key,
411 const struct sw_flow_key_range *range)
412 {
413 int key_start = range->start;
414 int key_end = range->end;
415 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
416 int hash_u32s = (key_end - key_start) >> 2;
417
418 /* Make sure number of hash bytes are multiple of u32. */
419 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
420
421 return jhash2(hash_key, hash_u32s, 0);
422 }
423
flow_key_start(const struct sw_flow_key * key)424 static int flow_key_start(const struct sw_flow_key *key)
425 {
426 if (key->tun_proto)
427 return 0;
428 else
429 return rounddown(offsetof(struct sw_flow_key, phy),
430 sizeof(long));
431 }
432
cmp_key(const struct sw_flow_key * key1,const struct sw_flow_key * key2,int key_start,int key_end)433 static bool cmp_key(const struct sw_flow_key *key1,
434 const struct sw_flow_key *key2,
435 int key_start, int key_end)
436 {
437 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
438 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
439 long diffs = 0;
440 int i;
441
442 for (i = key_start; i < key_end; i += sizeof(long))
443 diffs |= *cp1++ ^ *cp2++;
444
445 return diffs == 0;
446 }
447
flow_cmp_masked_key(const struct sw_flow * flow,const struct sw_flow_key * key,const struct sw_flow_key_range * range)448 static bool flow_cmp_masked_key(const struct sw_flow *flow,
449 const struct sw_flow_key *key,
450 const struct sw_flow_key_range *range)
451 {
452 return cmp_key(&flow->key, key, range->start, range->end);
453 }
454
ovs_flow_cmp_unmasked_key(const struct sw_flow * flow,const struct sw_flow_match * match)455 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
456 const struct sw_flow_match *match)
457 {
458 struct sw_flow_key *key = match->key;
459 int key_start = flow_key_start(key);
460 int key_end = match->range.end;
461
462 BUG_ON(ovs_identifier_is_ufid(&flow->id));
463 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
464 }
465
masked_flow_lookup(struct table_instance * ti,const struct sw_flow_key * unmasked,const struct sw_flow_mask * mask)466 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
467 const struct sw_flow_key *unmasked,
468 const struct sw_flow_mask *mask)
469 {
470 struct sw_flow *flow;
471 struct hlist_head *head;
472 u32 hash;
473 struct sw_flow_key masked_key;
474
475 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
476 hash = flow_hash(&masked_key, &mask->range);
477 head = find_bucket(ti, hash);
478 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
479 if (flow->mask == mask && flow->flow_table.hash == hash &&
480 flow_cmp_masked_key(flow, &masked_key, &mask->range))
481 return flow;
482 }
483 return NULL;
484 }
485
ovs_flow_tbl_lookup_stats(struct flow_table * tbl,const struct sw_flow_key * key,u32 * n_mask_hit)486 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
487 const struct sw_flow_key *key,
488 u32 *n_mask_hit)
489 {
490 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
491 struct sw_flow_mask *mask;
492 struct sw_flow *flow;
493
494 *n_mask_hit = 0;
495 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
496 (*n_mask_hit)++;
497 flow = masked_flow_lookup(ti, key, mask);
498 if (flow) /* Found */
499 return flow;
500 }
501 return NULL;
502 }
503
ovs_flow_tbl_lookup(struct flow_table * tbl,const struct sw_flow_key * key)504 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
505 const struct sw_flow_key *key)
506 {
507 u32 __always_unused n_mask_hit;
508
509 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
510 }
511
ovs_flow_tbl_lookup_exact(struct flow_table * tbl,const struct sw_flow_match * match)512 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
513 const struct sw_flow_match *match)
514 {
515 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
516 struct sw_flow_mask *mask;
517 struct sw_flow *flow;
518
519 /* Always called under ovs-mutex. */
520 list_for_each_entry(mask, &tbl->mask_list, list) {
521 flow = masked_flow_lookup(ti, match->key, mask);
522 if (flow && ovs_identifier_is_key(&flow->id) &&
523 ovs_flow_cmp_unmasked_key(flow, match))
524 return flow;
525 }
526 return NULL;
527 }
528
ufid_hash(const struct sw_flow_id * sfid)529 static u32 ufid_hash(const struct sw_flow_id *sfid)
530 {
531 return jhash(sfid->ufid, sfid->ufid_len, 0);
532 }
533
ovs_flow_cmp_ufid(const struct sw_flow * flow,const struct sw_flow_id * sfid)534 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
535 const struct sw_flow_id *sfid)
536 {
537 if (flow->id.ufid_len != sfid->ufid_len)
538 return false;
539
540 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
541 }
542
ovs_flow_cmp(const struct sw_flow * flow,const struct sw_flow_match * match)543 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
544 {
545 if (ovs_identifier_is_ufid(&flow->id))
546 return flow_cmp_masked_key(flow, match->key, &match->range);
547
548 return ovs_flow_cmp_unmasked_key(flow, match);
549 }
550
ovs_flow_tbl_lookup_ufid(struct flow_table * tbl,const struct sw_flow_id * ufid)551 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
552 const struct sw_flow_id *ufid)
553 {
554 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
555 struct sw_flow *flow;
556 struct hlist_head *head;
557 u32 hash;
558
559 hash = ufid_hash(ufid);
560 head = find_bucket(ti, hash);
561 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
562 if (flow->ufid_table.hash == hash &&
563 ovs_flow_cmp_ufid(flow, ufid))
564 return flow;
565 }
566 return NULL;
567 }
568
ovs_flow_tbl_num_masks(const struct flow_table * table)569 int ovs_flow_tbl_num_masks(const struct flow_table *table)
570 {
571 struct sw_flow_mask *mask;
572 int num = 0;
573
574 list_for_each_entry(mask, &table->mask_list, list)
575 num++;
576
577 return num;
578 }
579
table_instance_expand(struct table_instance * ti,bool ufid)580 static struct table_instance *table_instance_expand(struct table_instance *ti,
581 bool ufid)
582 {
583 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
584 }
585
586 /* Remove 'mask' from the mask list, if it is not needed any more. */
flow_mask_remove(struct flow_table * tbl,struct sw_flow_mask * mask)587 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
588 {
589 if (mask) {
590 /* ovs-lock is required to protect mask-refcount and
591 * mask list.
592 */
593 ASSERT_OVSL();
594 BUG_ON(!mask->ref_count);
595 mask->ref_count--;
596
597 if (!mask->ref_count) {
598 list_del_rcu(&mask->list);
599 kfree_rcu(mask, rcu);
600 }
601 }
602 }
603
604 /* Must be called with OVS mutex held. */
ovs_flow_tbl_remove(struct flow_table * table,struct sw_flow * flow)605 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
606 {
607 struct table_instance *ti = ovsl_dereference(table->ti);
608 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
609
610 BUG_ON(table->count == 0);
611 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
612 table->count--;
613 if (ovs_identifier_is_ufid(&flow->id)) {
614 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
615 table->ufid_count--;
616 }
617
618 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
619 * accessible as long as the RCU read lock is held.
620 */
621 flow_mask_remove(table, flow->mask);
622 }
623
mask_alloc(void)624 static struct sw_flow_mask *mask_alloc(void)
625 {
626 struct sw_flow_mask *mask;
627
628 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
629 if (mask)
630 mask->ref_count = 1;
631
632 return mask;
633 }
634
mask_equal(const struct sw_flow_mask * a,const struct sw_flow_mask * b)635 static bool mask_equal(const struct sw_flow_mask *a,
636 const struct sw_flow_mask *b)
637 {
638 const u8 *a_ = (const u8 *)&a->key + a->range.start;
639 const u8 *b_ = (const u8 *)&b->key + b->range.start;
640
641 return (a->range.end == b->range.end)
642 && (a->range.start == b->range.start)
643 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
644 }
645
flow_mask_find(const struct flow_table * tbl,const struct sw_flow_mask * mask)646 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
647 const struct sw_flow_mask *mask)
648 {
649 struct list_head *ml;
650
651 list_for_each(ml, &tbl->mask_list) {
652 struct sw_flow_mask *m;
653 m = container_of(ml, struct sw_flow_mask, list);
654 if (mask_equal(mask, m))
655 return m;
656 }
657
658 return NULL;
659 }
660
661 /* Add 'mask' into the mask list, if it is not already there. */
flow_mask_insert(struct flow_table * tbl,struct sw_flow * flow,const struct sw_flow_mask * new)662 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
663 const struct sw_flow_mask *new)
664 {
665 struct sw_flow_mask *mask;
666 mask = flow_mask_find(tbl, new);
667 if (!mask) {
668 /* Allocate a new mask if none exsits. */
669 mask = mask_alloc();
670 if (!mask)
671 return -ENOMEM;
672 mask->key = new->key;
673 mask->range = new->range;
674 list_add_rcu(&mask->list, &tbl->mask_list);
675 } else {
676 BUG_ON(!mask->ref_count);
677 mask->ref_count++;
678 }
679
680 flow->mask = mask;
681 return 0;
682 }
683
684 /* Must be called with OVS mutex held. */
flow_key_insert(struct flow_table * table,struct sw_flow * flow)685 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
686 {
687 struct table_instance *new_ti = NULL;
688 struct table_instance *ti;
689
690 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
691 ti = ovsl_dereference(table->ti);
692 table_instance_insert(ti, flow);
693 table->count++;
694
695 /* Expand table, if necessary, to make room. */
696 if (table->count > ti->n_buckets)
697 new_ti = table_instance_expand(ti, false);
698 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
699 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
700
701 if (new_ti) {
702 rcu_assign_pointer(table->ti, new_ti);
703 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
704 table->last_rehash = jiffies;
705 }
706 }
707
708 /* Must be called with OVS mutex held. */
flow_ufid_insert(struct flow_table * table,struct sw_flow * flow)709 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
710 {
711 struct table_instance *ti;
712
713 flow->ufid_table.hash = ufid_hash(&flow->id);
714 ti = ovsl_dereference(table->ufid_ti);
715 ufid_table_instance_insert(ti, flow);
716 table->ufid_count++;
717
718 /* Expand table, if necessary, to make room. */
719 if (table->ufid_count > ti->n_buckets) {
720 struct table_instance *new_ti;
721
722 new_ti = table_instance_expand(ti, true);
723 if (new_ti) {
724 rcu_assign_pointer(table->ufid_ti, new_ti);
725 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
726 }
727 }
728 }
729
730 /* Must be called with OVS mutex held. */
ovs_flow_tbl_insert(struct flow_table * table,struct sw_flow * flow,const struct sw_flow_mask * mask)731 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
732 const struct sw_flow_mask *mask)
733 {
734 int err;
735
736 err = flow_mask_insert(table, flow, mask);
737 if (err)
738 return err;
739 flow_key_insert(table, flow);
740 if (ovs_identifier_is_ufid(&flow->id))
741 flow_ufid_insert(table, flow);
742
743 return 0;
744 }
745
746 /* Initializes the flow module.
747 * Returns zero if successful or a negative error code. */
ovs_flow_init(void)748 int ovs_flow_init(void)
749 {
750 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
751 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
752
753 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
754 + (nr_cpu_ids
755 * sizeof(struct flow_stats *)),
756 0, 0, NULL);
757 if (flow_cache == NULL)
758 return -ENOMEM;
759
760 flow_stats_cache
761 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
762 0, SLAB_HWCACHE_ALIGN, NULL);
763 if (flow_stats_cache == NULL) {
764 kmem_cache_destroy(flow_cache);
765 flow_cache = NULL;
766 return -ENOMEM;
767 }
768
769 return 0;
770 }
771
772 /* Uninitializes the flow module. */
ovs_flow_exit(void)773 void ovs_flow_exit(void)
774 {
775 kmem_cache_destroy(flow_stats_cache);
776 kmem_cache_destroy(flow_cache);
777 }
778