Merge tag 'reset-for-v5.3' of git://git.pengutronix.de/git/pza/linux into arm/drivers
[sfrench/cifs-2.6.git] / net / sched / act_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_api.c  Packet action API.
4  *
5  * Author:      Jamal Hadi Salim
6  */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
24
25 static void tcf_action_goto_chain_exec(const struct tc_action *a,
26                                        struct tcf_result *res)
27 {
28         const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
29
30         res->goto_tp = rcu_dereference_bh(chain->filter_chain);
31 }
32
33 static void tcf_free_cookie_rcu(struct rcu_head *p)
34 {
35         struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
36
37         kfree(cookie->data);
38         kfree(cookie);
39 }
40
41 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
42                                   struct tc_cookie *new_cookie)
43 {
44         struct tc_cookie *old;
45
46         old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
47         if (old)
48                 call_rcu(&old->rcu, tcf_free_cookie_rcu);
49 }
50
51 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
52                              struct tcf_chain **newchain,
53                              struct netlink_ext_ack *extack)
54 {
55         int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
56         u32 chain_index;
57
58         if (!opcode)
59                 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
60         else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
61                 ret = 0;
62         if (ret) {
63                 NL_SET_ERR_MSG(extack, "invalid control action");
64                 goto end;
65         }
66
67         if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
68                 chain_index = action & TC_ACT_EXT_VAL_MASK;
69                 if (!tp || !newchain) {
70                         ret = -EINVAL;
71                         NL_SET_ERR_MSG(extack,
72                                        "can't goto NULL proto/chain");
73                         goto end;
74                 }
75                 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
76                 if (!*newchain) {
77                         ret = -ENOMEM;
78                         NL_SET_ERR_MSG(extack,
79                                        "can't allocate goto_chain");
80                 }
81         }
82 end:
83         return ret;
84 }
85 EXPORT_SYMBOL(tcf_action_check_ctrlact);
86
87 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
88                                          struct tcf_chain *goto_chain)
89 {
90         a->tcfa_action = action;
91         rcu_swap_protected(a->goto_chain, goto_chain, 1);
92         return goto_chain;
93 }
94 EXPORT_SYMBOL(tcf_action_set_ctrlact);
95
96 /* XXX: For standalone actions, we don't need a RCU grace period either, because
97  * actions are always connected to filters and filters are already destroyed in
98  * RCU callbacks, so after a RCU grace period actions are already disconnected
99  * from filters. Readers later can not find us.
100  */
101 static void free_tcf(struct tc_action *p)
102 {
103         struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
104
105         free_percpu(p->cpu_bstats);
106         free_percpu(p->cpu_bstats_hw);
107         free_percpu(p->cpu_qstats);
108
109         tcf_set_action_cookie(&p->act_cookie, NULL);
110         if (chain)
111                 tcf_chain_put_by_act(chain);
112
113         kfree(p);
114 }
115
116 static void tcf_action_cleanup(struct tc_action *p)
117 {
118         if (p->ops->cleanup)
119                 p->ops->cleanup(p);
120
121         gen_kill_estimator(&p->tcfa_rate_est);
122         free_tcf(p);
123 }
124
125 static int __tcf_action_put(struct tc_action *p, bool bind)
126 {
127         struct tcf_idrinfo *idrinfo = p->idrinfo;
128
129         if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
130                 if (bind)
131                         atomic_dec(&p->tcfa_bindcnt);
132                 idr_remove(&idrinfo->action_idr, p->tcfa_index);
133                 mutex_unlock(&idrinfo->lock);
134
135                 tcf_action_cleanup(p);
136                 return 1;
137         }
138
139         if (bind)
140                 atomic_dec(&p->tcfa_bindcnt);
141
142         return 0;
143 }
144
145 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
146 {
147         int ret = 0;
148
149         /* Release with strict==1 and bind==0 is only called through act API
150          * interface (classifiers always bind). Only case when action with
151          * positive reference count and zero bind count can exist is when it was
152          * also created with act API (unbinding last classifier will destroy the
153          * action if it was created by classifier). So only case when bind count
154          * can be changed after initial check is when unbound action is
155          * destroyed by act API while classifier binds to action with same id
156          * concurrently. This result either creation of new action(same behavior
157          * as before), or reusing existing action if concurrent process
158          * increments reference count before action is deleted. Both scenarios
159          * are acceptable.
160          */
161         if (p) {
162                 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
163                         return -EPERM;
164
165                 if (__tcf_action_put(p, bind))
166                         ret = ACT_P_DELETED;
167         }
168
169         return ret;
170 }
171 EXPORT_SYMBOL(__tcf_idr_release);
172
173 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
174 {
175         struct tc_cookie *act_cookie;
176         u32 cookie_len = 0;
177
178         rcu_read_lock();
179         act_cookie = rcu_dereference(act->act_cookie);
180
181         if (act_cookie)
182                 cookie_len = nla_total_size(act_cookie->len);
183         rcu_read_unlock();
184
185         return  nla_total_size(0) /* action number nested */
186                 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
187                 + cookie_len /* TCA_ACT_COOKIE */
188                 + nla_total_size(0) /* TCA_ACT_STATS nested */
189                 /* TCA_STATS_BASIC */
190                 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
191                 /* TCA_STATS_QUEUE */
192                 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
193                 + nla_total_size(0) /* TCA_OPTIONS nested */
194                 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
195 }
196
197 static size_t tcf_action_full_attrs_size(size_t sz)
198 {
199         return NLMSG_HDRLEN                     /* struct nlmsghdr */
200                 + sizeof(struct tcamsg)
201                 + nla_total_size(0)             /* TCA_ACT_TAB nested */
202                 + sz;
203 }
204
205 static size_t tcf_action_fill_size(const struct tc_action *act)
206 {
207         size_t sz = tcf_action_shared_attrs_size(act);
208
209         if (act->ops->get_fill_size)
210                 return act->ops->get_fill_size(act) + sz;
211         return sz;
212 }
213
214 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
215                            struct netlink_callback *cb)
216 {
217         int err = 0, index = -1, s_i = 0, n_i = 0;
218         u32 act_flags = cb->args[2];
219         unsigned long jiffy_since = cb->args[3];
220         struct nlattr *nest;
221         struct idr *idr = &idrinfo->action_idr;
222         struct tc_action *p;
223         unsigned long id = 1;
224
225         mutex_lock(&idrinfo->lock);
226
227         s_i = cb->args[0];
228
229         idr_for_each_entry_ul(idr, p, id) {
230                 index++;
231                 if (index < s_i)
232                         continue;
233
234                 if (jiffy_since &&
235                     time_after(jiffy_since,
236                                (unsigned long)p->tcfa_tm.lastuse))
237                         continue;
238
239                 nest = nla_nest_start_noflag(skb, n_i);
240                 if (!nest) {
241                         index--;
242                         goto nla_put_failure;
243                 }
244                 err = tcf_action_dump_1(skb, p, 0, 0);
245                 if (err < 0) {
246                         index--;
247                         nlmsg_trim(skb, nest);
248                         goto done;
249                 }
250                 nla_nest_end(skb, nest);
251                 n_i++;
252                 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
253                     n_i >= TCA_ACT_MAX_PRIO)
254                         goto done;
255         }
256 done:
257         if (index >= 0)
258                 cb->args[0] = index + 1;
259
260         mutex_unlock(&idrinfo->lock);
261         if (n_i) {
262                 if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
263                         cb->args[1] = n_i;
264         }
265         return n_i;
266
267 nla_put_failure:
268         nla_nest_cancel(skb, nest);
269         goto done;
270 }
271
272 static int tcf_idr_release_unsafe(struct tc_action *p)
273 {
274         if (atomic_read(&p->tcfa_bindcnt) > 0)
275                 return -EPERM;
276
277         if (refcount_dec_and_test(&p->tcfa_refcnt)) {
278                 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
279                 tcf_action_cleanup(p);
280                 return ACT_P_DELETED;
281         }
282
283         return 0;
284 }
285
286 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
287                           const struct tc_action_ops *ops)
288 {
289         struct nlattr *nest;
290         int n_i = 0;
291         int ret = -EINVAL;
292         struct idr *idr = &idrinfo->action_idr;
293         struct tc_action *p;
294         unsigned long id = 1;
295
296         nest = nla_nest_start_noflag(skb, 0);
297         if (nest == NULL)
298                 goto nla_put_failure;
299         if (nla_put_string(skb, TCA_KIND, ops->kind))
300                 goto nla_put_failure;
301
302         mutex_lock(&idrinfo->lock);
303         idr_for_each_entry_ul(idr, p, id) {
304                 ret = tcf_idr_release_unsafe(p);
305                 if (ret == ACT_P_DELETED) {
306                         module_put(ops->owner);
307                         n_i++;
308                 } else if (ret < 0) {
309                         mutex_unlock(&idrinfo->lock);
310                         goto nla_put_failure;
311                 }
312         }
313         mutex_unlock(&idrinfo->lock);
314
315         if (nla_put_u32(skb, TCA_FCNT, n_i))
316                 goto nla_put_failure;
317         nla_nest_end(skb, nest);
318
319         return n_i;
320 nla_put_failure:
321         nla_nest_cancel(skb, nest);
322         return ret;
323 }
324
325 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
326                        struct netlink_callback *cb, int type,
327                        const struct tc_action_ops *ops,
328                        struct netlink_ext_ack *extack)
329 {
330         struct tcf_idrinfo *idrinfo = tn->idrinfo;
331
332         if (type == RTM_DELACTION) {
333                 return tcf_del_walker(idrinfo, skb, ops);
334         } else if (type == RTM_GETACTION) {
335                 return tcf_dump_walker(idrinfo, skb, cb);
336         } else {
337                 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
338                 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
339                 return -EINVAL;
340         }
341 }
342 EXPORT_SYMBOL(tcf_generic_walker);
343
344 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
345 {
346         struct tcf_idrinfo *idrinfo = tn->idrinfo;
347         struct tc_action *p;
348
349         mutex_lock(&idrinfo->lock);
350         p = idr_find(&idrinfo->action_idr, index);
351         if (IS_ERR(p))
352                 p = NULL;
353         else if (p)
354                 refcount_inc(&p->tcfa_refcnt);
355         mutex_unlock(&idrinfo->lock);
356
357         if (p) {
358                 *a = p;
359                 return true;
360         }
361         return false;
362 }
363 EXPORT_SYMBOL(tcf_idr_search);
364
365 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
366 {
367         struct tc_action *p;
368         int ret = 0;
369
370         mutex_lock(&idrinfo->lock);
371         p = idr_find(&idrinfo->action_idr, index);
372         if (!p) {
373                 mutex_unlock(&idrinfo->lock);
374                 return -ENOENT;
375         }
376
377         if (!atomic_read(&p->tcfa_bindcnt)) {
378                 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
379                         struct module *owner = p->ops->owner;
380
381                         WARN_ON(p != idr_remove(&idrinfo->action_idr,
382                                                 p->tcfa_index));
383                         mutex_unlock(&idrinfo->lock);
384
385                         tcf_action_cleanup(p);
386                         module_put(owner);
387                         return 0;
388                 }
389                 ret = 0;
390         } else {
391                 ret = -EPERM;
392         }
393
394         mutex_unlock(&idrinfo->lock);
395         return ret;
396 }
397
398 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
399                    struct tc_action **a, const struct tc_action_ops *ops,
400                    int bind, bool cpustats)
401 {
402         struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
403         struct tcf_idrinfo *idrinfo = tn->idrinfo;
404         int err = -ENOMEM;
405
406         if (unlikely(!p))
407                 return -ENOMEM;
408         refcount_set(&p->tcfa_refcnt, 1);
409         if (bind)
410                 atomic_set(&p->tcfa_bindcnt, 1);
411
412         if (cpustats) {
413                 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
414                 if (!p->cpu_bstats)
415                         goto err1;
416                 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
417                 if (!p->cpu_bstats_hw)
418                         goto err2;
419                 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
420                 if (!p->cpu_qstats)
421                         goto err3;
422         }
423         spin_lock_init(&p->tcfa_lock);
424         p->tcfa_index = index;
425         p->tcfa_tm.install = jiffies;
426         p->tcfa_tm.lastuse = jiffies;
427         p->tcfa_tm.firstuse = 0;
428         if (est) {
429                 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
430                                         &p->tcfa_rate_est,
431                                         &p->tcfa_lock, NULL, est);
432                 if (err)
433                         goto err4;
434         }
435
436         p->idrinfo = idrinfo;
437         p->ops = ops;
438         *a = p;
439         return 0;
440 err4:
441         free_percpu(p->cpu_qstats);
442 err3:
443         free_percpu(p->cpu_bstats_hw);
444 err2:
445         free_percpu(p->cpu_bstats);
446 err1:
447         kfree(p);
448         return err;
449 }
450 EXPORT_SYMBOL(tcf_idr_create);
451
452 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
453 {
454         struct tcf_idrinfo *idrinfo = tn->idrinfo;
455
456         mutex_lock(&idrinfo->lock);
457         /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
458         WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
459         mutex_unlock(&idrinfo->lock);
460 }
461 EXPORT_SYMBOL(tcf_idr_insert);
462
463 /* Cleanup idr index that was allocated but not initialized. */
464
465 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
466 {
467         struct tcf_idrinfo *idrinfo = tn->idrinfo;
468
469         mutex_lock(&idrinfo->lock);
470         /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
471         WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
472         mutex_unlock(&idrinfo->lock);
473 }
474 EXPORT_SYMBOL(tcf_idr_cleanup);
475
476 /* Check if action with specified index exists. If actions is found, increments
477  * its reference and bind counters, and return 1. Otherwise insert temporary
478  * error pointer (to prevent concurrent users from inserting actions with same
479  * index) and return 0.
480  */
481
482 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
483                         struct tc_action **a, int bind)
484 {
485         struct tcf_idrinfo *idrinfo = tn->idrinfo;
486         struct tc_action *p;
487         int ret;
488
489 again:
490         mutex_lock(&idrinfo->lock);
491         if (*index) {
492                 p = idr_find(&idrinfo->action_idr, *index);
493                 if (IS_ERR(p)) {
494                         /* This means that another process allocated
495                          * index but did not assign the pointer yet.
496                          */
497                         mutex_unlock(&idrinfo->lock);
498                         goto again;
499                 }
500
501                 if (p) {
502                         refcount_inc(&p->tcfa_refcnt);
503                         if (bind)
504                                 atomic_inc(&p->tcfa_bindcnt);
505                         *a = p;
506                         ret = 1;
507                 } else {
508                         *a = NULL;
509                         ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
510                                             *index, GFP_KERNEL);
511                         if (!ret)
512                                 idr_replace(&idrinfo->action_idr,
513                                             ERR_PTR(-EBUSY), *index);
514                 }
515         } else {
516                 *index = 1;
517                 *a = NULL;
518                 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
519                                     UINT_MAX, GFP_KERNEL);
520                 if (!ret)
521                         idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
522                                     *index);
523         }
524         mutex_unlock(&idrinfo->lock);
525         return ret;
526 }
527 EXPORT_SYMBOL(tcf_idr_check_alloc);
528
529 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
530                          struct tcf_idrinfo *idrinfo)
531 {
532         struct idr *idr = &idrinfo->action_idr;
533         struct tc_action *p;
534         int ret;
535         unsigned long id = 1;
536
537         idr_for_each_entry_ul(idr, p, id) {
538                 ret = __tcf_idr_release(p, false, true);
539                 if (ret == ACT_P_DELETED)
540                         module_put(ops->owner);
541                 else if (ret < 0)
542                         return;
543         }
544         idr_destroy(&idrinfo->action_idr);
545 }
546 EXPORT_SYMBOL(tcf_idrinfo_destroy);
547
548 static LIST_HEAD(act_base);
549 static DEFINE_RWLOCK(act_mod_lock);
550
551 int tcf_register_action(struct tc_action_ops *act,
552                         struct pernet_operations *ops)
553 {
554         struct tc_action_ops *a;
555         int ret;
556
557         if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
558                 return -EINVAL;
559
560         /* We have to register pernet ops before making the action ops visible,
561          * otherwise tcf_action_init_1() could get a partially initialized
562          * netns.
563          */
564         ret = register_pernet_subsys(ops);
565         if (ret)
566                 return ret;
567
568         write_lock(&act_mod_lock);
569         list_for_each_entry(a, &act_base, head) {
570                 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
571                         write_unlock(&act_mod_lock);
572                         unregister_pernet_subsys(ops);
573                         return -EEXIST;
574                 }
575         }
576         list_add_tail(&act->head, &act_base);
577         write_unlock(&act_mod_lock);
578
579         return 0;
580 }
581 EXPORT_SYMBOL(tcf_register_action);
582
583 int tcf_unregister_action(struct tc_action_ops *act,
584                           struct pernet_operations *ops)
585 {
586         struct tc_action_ops *a;
587         int err = -ENOENT;
588
589         write_lock(&act_mod_lock);
590         list_for_each_entry(a, &act_base, head) {
591                 if (a == act) {
592                         list_del(&act->head);
593                         err = 0;
594                         break;
595                 }
596         }
597         write_unlock(&act_mod_lock);
598         if (!err)
599                 unregister_pernet_subsys(ops);
600         return err;
601 }
602 EXPORT_SYMBOL(tcf_unregister_action);
603
604 /* lookup by name */
605 static struct tc_action_ops *tc_lookup_action_n(char *kind)
606 {
607         struct tc_action_ops *a, *res = NULL;
608
609         if (kind) {
610                 read_lock(&act_mod_lock);
611                 list_for_each_entry(a, &act_base, head) {
612                         if (strcmp(kind, a->kind) == 0) {
613                                 if (try_module_get(a->owner))
614                                         res = a;
615                                 break;
616                         }
617                 }
618                 read_unlock(&act_mod_lock);
619         }
620         return res;
621 }
622
623 /* lookup by nlattr */
624 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
625 {
626         struct tc_action_ops *a, *res = NULL;
627
628         if (kind) {
629                 read_lock(&act_mod_lock);
630                 list_for_each_entry(a, &act_base, head) {
631                         if (nla_strcmp(kind, a->kind) == 0) {
632                                 if (try_module_get(a->owner))
633                                         res = a;
634                                 break;
635                         }
636                 }
637                 read_unlock(&act_mod_lock);
638         }
639         return res;
640 }
641
642 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
643 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
644 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
645                     int nr_actions, struct tcf_result *res)
646 {
647         u32 jmp_prgcnt = 0;
648         u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
649         int i;
650         int ret = TC_ACT_OK;
651
652         if (skb_skip_tc_classify(skb))
653                 return TC_ACT_OK;
654
655 restart_act_graph:
656         for (i = 0; i < nr_actions; i++) {
657                 const struct tc_action *a = actions[i];
658
659                 if (jmp_prgcnt > 0) {
660                         jmp_prgcnt -= 1;
661                         continue;
662                 }
663 repeat:
664                 ret = a->ops->act(skb, a, res);
665                 if (ret == TC_ACT_REPEAT)
666                         goto repeat;    /* we need a ttl - JHS */
667
668                 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
669                         jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
670                         if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
671                                 /* faulty opcode, stop pipeline */
672                                 return TC_ACT_OK;
673                         } else {
674                                 jmp_ttl -= 1;
675                                 if (jmp_ttl > 0)
676                                         goto restart_act_graph;
677                                 else /* faulty graph, stop pipeline */
678                                         return TC_ACT_OK;
679                         }
680                 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
681                         if (unlikely(!rcu_access_pointer(a->goto_chain))) {
682                                 net_warn_ratelimited("can't go to NULL chain!\n");
683                                 return TC_ACT_SHOT;
684                         }
685                         tcf_action_goto_chain_exec(a, res);
686                 }
687
688                 if (ret != TC_ACT_PIPE)
689                         break;
690         }
691
692         return ret;
693 }
694 EXPORT_SYMBOL(tcf_action_exec);
695
696 int tcf_action_destroy(struct tc_action *actions[], int bind)
697 {
698         const struct tc_action_ops *ops;
699         struct tc_action *a;
700         int ret = 0, i;
701
702         for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
703                 a = actions[i];
704                 actions[i] = NULL;
705                 ops = a->ops;
706                 ret = __tcf_idr_release(a, bind, true);
707                 if (ret == ACT_P_DELETED)
708                         module_put(ops->owner);
709                 else if (ret < 0)
710                         return ret;
711         }
712         return ret;
713 }
714
715 static int tcf_action_destroy_1(struct tc_action *a, int bind)
716 {
717         struct tc_action *actions[] = { a, NULL };
718
719         return tcf_action_destroy(actions, bind);
720 }
721
722 static int tcf_action_put(struct tc_action *p)
723 {
724         return __tcf_action_put(p, false);
725 }
726
727 /* Put all actions in this array, skip those NULL's. */
728 static void tcf_action_put_many(struct tc_action *actions[])
729 {
730         int i;
731
732         for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
733                 struct tc_action *a = actions[i];
734                 const struct tc_action_ops *ops;
735
736                 if (!a)
737                         continue;
738                 ops = a->ops;
739                 if (tcf_action_put(a))
740                         module_put(ops->owner);
741         }
742 }
743
744 int
745 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
746 {
747         return a->ops->dump(skb, a, bind, ref);
748 }
749
750 int
751 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
752 {
753         int err = -EINVAL;
754         unsigned char *b = skb_tail_pointer(skb);
755         struct nlattr *nest;
756         struct tc_cookie *cookie;
757
758         if (nla_put_string(skb, TCA_KIND, a->ops->kind))
759                 goto nla_put_failure;
760         if (tcf_action_copy_stats(skb, a, 0))
761                 goto nla_put_failure;
762
763         rcu_read_lock();
764         cookie = rcu_dereference(a->act_cookie);
765         if (cookie) {
766                 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
767                         rcu_read_unlock();
768                         goto nla_put_failure;
769                 }
770         }
771         rcu_read_unlock();
772
773         nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
774         if (nest == NULL)
775                 goto nla_put_failure;
776         err = tcf_action_dump_old(skb, a, bind, ref);
777         if (err > 0) {
778                 nla_nest_end(skb, nest);
779                 return err;
780         }
781
782 nla_put_failure:
783         nlmsg_trim(skb, b);
784         return -1;
785 }
786 EXPORT_SYMBOL(tcf_action_dump_1);
787
788 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
789                     int bind, int ref)
790 {
791         struct tc_action *a;
792         int err = -EINVAL, i;
793         struct nlattr *nest;
794
795         for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
796                 a = actions[i];
797                 nest = nla_nest_start_noflag(skb, i + 1);
798                 if (nest == NULL)
799                         goto nla_put_failure;
800                 err = tcf_action_dump_1(skb, a, bind, ref);
801                 if (err < 0)
802                         goto errout;
803                 nla_nest_end(skb, nest);
804         }
805
806         return 0;
807
808 nla_put_failure:
809         err = -EINVAL;
810 errout:
811         nla_nest_cancel(skb, nest);
812         return err;
813 }
814
815 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
816 {
817         struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
818         if (!c)
819                 return NULL;
820
821         c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
822         if (!c->data) {
823                 kfree(c);
824                 return NULL;
825         }
826         c->len = nla_len(tb[TCA_ACT_COOKIE]);
827
828         return c;
829 }
830
831 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
832                                     struct nlattr *nla, struct nlattr *est,
833                                     char *name, int ovr, int bind,
834                                     bool rtnl_held,
835                                     struct netlink_ext_ack *extack)
836 {
837         struct tc_action *a;
838         struct tc_action_ops *a_o;
839         struct tc_cookie *cookie = NULL;
840         char act_name[IFNAMSIZ];
841         struct nlattr *tb[TCA_ACT_MAX + 1];
842         struct nlattr *kind;
843         int err;
844
845         if (name == NULL) {
846                 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL,
847                                                   extack);
848                 if (err < 0)
849                         goto err_out;
850                 err = -EINVAL;
851                 kind = tb[TCA_ACT_KIND];
852                 if (!kind) {
853                         NL_SET_ERR_MSG(extack, "TC action kind must be specified");
854                         goto err_out;
855                 }
856                 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
857                         NL_SET_ERR_MSG(extack, "TC action name too long");
858                         goto err_out;
859                 }
860                 if (tb[TCA_ACT_COOKIE]) {
861                         int cklen = nla_len(tb[TCA_ACT_COOKIE]);
862
863                         if (cklen > TC_COOKIE_MAX_SIZE) {
864                                 NL_SET_ERR_MSG(extack, "TC cookie size above the maximum");
865                                 goto err_out;
866                         }
867
868                         cookie = nla_memdup_cookie(tb);
869                         if (!cookie) {
870                                 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
871                                 err = -ENOMEM;
872                                 goto err_out;
873                         }
874                 }
875         } else {
876                 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
877                         NL_SET_ERR_MSG(extack, "TC action name too long");
878                         err = -EINVAL;
879                         goto err_out;
880                 }
881         }
882
883         a_o = tc_lookup_action_n(act_name);
884         if (a_o == NULL) {
885 #ifdef CONFIG_MODULES
886                 if (rtnl_held)
887                         rtnl_unlock();
888                 request_module("act_%s", act_name);
889                 if (rtnl_held)
890                         rtnl_lock();
891
892                 a_o = tc_lookup_action_n(act_name);
893
894                 /* We dropped the RTNL semaphore in order to
895                  * perform the module load.  So, even if we
896                  * succeeded in loading the module we have to
897                  * tell the caller to replay the request.  We
898                  * indicate this using -EAGAIN.
899                  */
900                 if (a_o != NULL) {
901                         err = -EAGAIN;
902                         goto err_mod;
903                 }
904 #endif
905                 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
906                 err = -ENOENT;
907                 goto err_out;
908         }
909
910         /* backward compatibility for policer */
911         if (name == NULL)
912                 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
913                                 rtnl_held, tp, extack);
914         else
915                 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
916                                 tp, extack);
917         if (err < 0)
918                 goto err_mod;
919
920         if (!name && tb[TCA_ACT_COOKIE])
921                 tcf_set_action_cookie(&a->act_cookie, cookie);
922
923         /* module count goes up only when brand new policy is created
924          * if it exists and is only bound to in a_o->init() then
925          * ACT_P_CREATED is not returned (a zero is).
926          */
927         if (err != ACT_P_CREATED)
928                 module_put(a_o->owner);
929
930         if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
931             !rcu_access_pointer(a->goto_chain)) {
932                 tcf_action_destroy_1(a, bind);
933                 NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
934                 return ERR_PTR(-EINVAL);
935         }
936
937         return a;
938
939 err_mod:
940         module_put(a_o->owner);
941 err_out:
942         if (cookie) {
943                 kfree(cookie->data);
944                 kfree(cookie);
945         }
946         return ERR_PTR(err);
947 }
948
949 /* Returns numbers of initialized actions or negative error. */
950
951 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
952                     struct nlattr *est, char *name, int ovr, int bind,
953                     struct tc_action *actions[], size_t *attr_size,
954                     bool rtnl_held, struct netlink_ext_ack *extack)
955 {
956         struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
957         struct tc_action *act;
958         size_t sz = 0;
959         int err;
960         int i;
961
962         err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
963                                           extack);
964         if (err < 0)
965                 return err;
966
967         for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
968                 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
969                                         rtnl_held, extack);
970                 if (IS_ERR(act)) {
971                         err = PTR_ERR(act);
972                         goto err;
973                 }
974                 act->order = i;
975                 sz += tcf_action_fill_size(act);
976                 /* Start from index 0 */
977                 actions[i - 1] = act;
978         }
979
980         *attr_size = tcf_action_full_attrs_size(sz);
981         return i - 1;
982
983 err:
984         tcf_action_destroy(actions, bind);
985         return err;
986 }
987
988 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
989                           int compat_mode)
990 {
991         int err = 0;
992         struct gnet_dump d;
993
994         if (p == NULL)
995                 goto errout;
996
997         /* compat_mode being true specifies a call that is supposed
998          * to add additional backward compatibility statistic TLVs.
999          */
1000         if (compat_mode) {
1001                 if (p->type == TCA_OLD_COMPAT)
1002                         err = gnet_stats_start_copy_compat(skb, 0,
1003                                                            TCA_STATS,
1004                                                            TCA_XSTATS,
1005                                                            &p->tcfa_lock, &d,
1006                                                            TCA_PAD);
1007                 else
1008                         return 0;
1009         } else
1010                 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1011                                             &p->tcfa_lock, &d, TCA_ACT_PAD);
1012
1013         if (err < 0)
1014                 goto errout;
1015
1016         if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
1017             gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
1018                                      &p->tcfa_bstats_hw) < 0 ||
1019             gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1020             gnet_stats_copy_queue(&d, p->cpu_qstats,
1021                                   &p->tcfa_qstats,
1022                                   p->tcfa_qstats.qlen) < 0)
1023                 goto errout;
1024
1025         if (gnet_stats_finish_copy(&d) < 0)
1026                 goto errout;
1027
1028         return 0;
1029
1030 errout:
1031         return -1;
1032 }
1033
1034 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1035                         u32 portid, u32 seq, u16 flags, int event, int bind,
1036                         int ref)
1037 {
1038         struct tcamsg *t;
1039         struct nlmsghdr *nlh;
1040         unsigned char *b = skb_tail_pointer(skb);
1041         struct nlattr *nest;
1042
1043         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1044         if (!nlh)
1045                 goto out_nlmsg_trim;
1046         t = nlmsg_data(nlh);
1047         t->tca_family = AF_UNSPEC;
1048         t->tca__pad1 = 0;
1049         t->tca__pad2 = 0;
1050
1051         nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1052         if (!nest)
1053                 goto out_nlmsg_trim;
1054
1055         if (tcf_action_dump(skb, actions, bind, ref) < 0)
1056                 goto out_nlmsg_trim;
1057
1058         nla_nest_end(skb, nest);
1059
1060         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1061         return skb->len;
1062
1063 out_nlmsg_trim:
1064         nlmsg_trim(skb, b);
1065         return -1;
1066 }
1067
1068 static int
1069 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1070                struct tc_action *actions[], int event,
1071                struct netlink_ext_ack *extack)
1072 {
1073         struct sk_buff *skb;
1074
1075         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1076         if (!skb)
1077                 return -ENOBUFS;
1078         if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1079                          0, 1) <= 0) {
1080                 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1081                 kfree_skb(skb);
1082                 return -EINVAL;
1083         }
1084
1085         return rtnl_unicast(skb, net, portid);
1086 }
1087
1088 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1089                                           struct nlmsghdr *n, u32 portid,
1090                                           struct netlink_ext_ack *extack)
1091 {
1092         struct nlattr *tb[TCA_ACT_MAX + 1];
1093         const struct tc_action_ops *ops;
1094         struct tc_action *a;
1095         int index;
1096         int err;
1097
1098         err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack);
1099         if (err < 0)
1100                 goto err_out;
1101
1102         err = -EINVAL;
1103         if (tb[TCA_ACT_INDEX] == NULL ||
1104             nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1105                 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1106                 goto err_out;
1107         }
1108         index = nla_get_u32(tb[TCA_ACT_INDEX]);
1109
1110         err = -EINVAL;
1111         ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1112         if (!ops) { /* could happen in batch of actions */
1113                 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1114                 goto err_out;
1115         }
1116         err = -ENOENT;
1117         if (ops->lookup(net, &a, index) == 0) {
1118                 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1119                 goto err_mod;
1120         }
1121
1122         module_put(ops->owner);
1123         return a;
1124
1125 err_mod:
1126         module_put(ops->owner);
1127 err_out:
1128         return ERR_PTR(err);
1129 }
1130
1131 static int tca_action_flush(struct net *net, struct nlattr *nla,
1132                             struct nlmsghdr *n, u32 portid,
1133                             struct netlink_ext_ack *extack)
1134 {
1135         struct sk_buff *skb;
1136         unsigned char *b;
1137         struct nlmsghdr *nlh;
1138         struct tcamsg *t;
1139         struct netlink_callback dcb;
1140         struct nlattr *nest;
1141         struct nlattr *tb[TCA_ACT_MAX + 1];
1142         const struct tc_action_ops *ops;
1143         struct nlattr *kind;
1144         int err = -ENOMEM;
1145
1146         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1147         if (!skb)
1148                 return err;
1149
1150         b = skb_tail_pointer(skb);
1151
1152         err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack);
1153         if (err < 0)
1154                 goto err_out;
1155
1156         err = -EINVAL;
1157         kind = tb[TCA_ACT_KIND];
1158         ops = tc_lookup_action(kind);
1159         if (!ops) { /*some idjot trying to flush unknown action */
1160                 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1161                 goto err_out;
1162         }
1163
1164         nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1165                         sizeof(*t), 0);
1166         if (!nlh) {
1167                 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1168                 goto out_module_put;
1169         }
1170         t = nlmsg_data(nlh);
1171         t->tca_family = AF_UNSPEC;
1172         t->tca__pad1 = 0;
1173         t->tca__pad2 = 0;
1174
1175         nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1176         if (!nest) {
1177                 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1178                 goto out_module_put;
1179         }
1180
1181         err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1182         if (err <= 0) {
1183                 nla_nest_cancel(skb, nest);
1184                 goto out_module_put;
1185         }
1186
1187         nla_nest_end(skb, nest);
1188
1189         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1190         nlh->nlmsg_flags |= NLM_F_ROOT;
1191         module_put(ops->owner);
1192         err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1193                              n->nlmsg_flags & NLM_F_ECHO);
1194         if (err > 0)
1195                 return 0;
1196         if (err < 0)
1197                 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1198
1199         return err;
1200
1201 out_module_put:
1202         module_put(ops->owner);
1203 err_out:
1204         kfree_skb(skb);
1205         return err;
1206 }
1207
1208 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1209 {
1210         int i;
1211
1212         for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1213                 struct tc_action *a = actions[i];
1214                 const struct tc_action_ops *ops = a->ops;
1215                 /* Actions can be deleted concurrently so we must save their
1216                  * type and id to search again after reference is released.
1217                  */
1218                 struct tcf_idrinfo *idrinfo = a->idrinfo;
1219                 u32 act_index = a->tcfa_index;
1220
1221                 actions[i] = NULL;
1222                 if (tcf_action_put(a)) {
1223                         /* last reference, action was deleted concurrently */
1224                         module_put(ops->owner);
1225                 } else  {
1226                         int ret;
1227
1228                         /* now do the delete */
1229                         ret = tcf_idr_delete_index(idrinfo, act_index);
1230                         if (ret < 0)
1231                                 return ret;
1232                 }
1233         }
1234         return 0;
1235 }
1236
1237 static int
1238 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1239                u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1240 {
1241         int ret;
1242         struct sk_buff *skb;
1243
1244         skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1245                         GFP_KERNEL);
1246         if (!skb)
1247                 return -ENOBUFS;
1248
1249         if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1250                          0, 2) <= 0) {
1251                 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1252                 kfree_skb(skb);
1253                 return -EINVAL;
1254         }
1255
1256         /* now do the delete */
1257         ret = tcf_action_delete(net, actions);
1258         if (ret < 0) {
1259                 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1260                 kfree_skb(skb);
1261                 return ret;
1262         }
1263
1264         ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1265                              n->nlmsg_flags & NLM_F_ECHO);
1266         if (ret > 0)
1267                 return 0;
1268         return ret;
1269 }
1270
1271 static int
1272 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1273               u32 portid, int event, struct netlink_ext_ack *extack)
1274 {
1275         int i, ret;
1276         struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1277         struct tc_action *act;
1278         size_t attr_size = 0;
1279         struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1280
1281         ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1282                                           extack);
1283         if (ret < 0)
1284                 return ret;
1285
1286         if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1287                 if (tb[1])
1288                         return tca_action_flush(net, tb[1], n, portid, extack);
1289
1290                 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1291                 return -EINVAL;
1292         }
1293
1294         for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1295                 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1296                 if (IS_ERR(act)) {
1297                         ret = PTR_ERR(act);
1298                         goto err;
1299                 }
1300                 attr_size += tcf_action_fill_size(act);
1301                 actions[i - 1] = act;
1302         }
1303
1304         attr_size = tcf_action_full_attrs_size(attr_size);
1305
1306         if (event == RTM_GETACTION)
1307                 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1308         else { /* delete */
1309                 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1310                 if (ret)
1311                         goto err;
1312                 return 0;
1313         }
1314 err:
1315         tcf_action_put_many(actions);
1316         return ret;
1317 }
1318
1319 static int
1320 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1321                u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1322 {
1323         struct sk_buff *skb;
1324         int err = 0;
1325
1326         skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1327                         GFP_KERNEL);
1328         if (!skb)
1329                 return -ENOBUFS;
1330
1331         if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1332                          RTM_NEWACTION, 0, 0) <= 0) {
1333                 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1334                 kfree_skb(skb);
1335                 return -EINVAL;
1336         }
1337
1338         err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1339                              n->nlmsg_flags & NLM_F_ECHO);
1340         if (err > 0)
1341                 err = 0;
1342         return err;
1343 }
1344
1345 static int tcf_action_add(struct net *net, struct nlattr *nla,
1346                           struct nlmsghdr *n, u32 portid, int ovr,
1347                           struct netlink_ext_ack *extack)
1348 {
1349         size_t attr_size = 0;
1350         int ret = 0;
1351         struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1352
1353         ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
1354                               &attr_size, true, extack);
1355         if (ret < 0)
1356                 return ret;
1357         ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1358         if (ovr)
1359                 tcf_action_put_many(actions);
1360
1361         return ret;
1362 }
1363
1364 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
1365 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1366         [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32,
1367                              .validation_data = &tcaa_root_flags_allowed },
1368         [TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
1369 };
1370
1371 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1372                          struct netlink_ext_ack *extack)
1373 {
1374         struct net *net = sock_net(skb->sk);
1375         struct nlattr *tca[TCA_ROOT_MAX + 1];
1376         u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1377         int ret = 0, ovr = 0;
1378
1379         if ((n->nlmsg_type != RTM_GETACTION) &&
1380             !netlink_capable(skb, CAP_NET_ADMIN))
1381                 return -EPERM;
1382
1383         ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
1384                                      TCA_ROOT_MAX, NULL, extack);
1385         if (ret < 0)
1386                 return ret;
1387
1388         if (tca[TCA_ACT_TAB] == NULL) {
1389                 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1390                 return -EINVAL;
1391         }
1392
1393         /* n->nlmsg_flags & NLM_F_CREATE */
1394         switch (n->nlmsg_type) {
1395         case RTM_NEWACTION:
1396                 /* we are going to assume all other flags
1397                  * imply create only if it doesn't exist
1398                  * Note that CREATE | EXCL implies that
1399                  * but since we want avoid ambiguity (eg when flags
1400                  * is zero) then just set this
1401                  */
1402                 if (n->nlmsg_flags & NLM_F_REPLACE)
1403                         ovr = 1;
1404 replay:
1405                 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
1406                                      extack);
1407                 if (ret == -EAGAIN)
1408                         goto replay;
1409                 break;
1410         case RTM_DELACTION:
1411                 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1412                                     portid, RTM_DELACTION, extack);
1413                 break;
1414         case RTM_GETACTION:
1415                 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1416                                     portid, RTM_GETACTION, extack);
1417                 break;
1418         default:
1419                 BUG();
1420         }
1421
1422         return ret;
1423 }
1424
1425 static struct nlattr *find_dump_kind(struct nlattr **nla)
1426 {
1427         struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1428         struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1429         struct nlattr *kind;
1430
1431         tb1 = nla[TCA_ACT_TAB];
1432         if (tb1 == NULL)
1433                 return NULL;
1434
1435         if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1436                 return NULL;
1437
1438         if (tb[1] == NULL)
1439                 return NULL;
1440         if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
1441                 return NULL;
1442         kind = tb2[TCA_ACT_KIND];
1443
1444         return kind;
1445 }
1446
1447 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1448 {
1449         struct net *net = sock_net(skb->sk);
1450         struct nlmsghdr *nlh;
1451         unsigned char *b = skb_tail_pointer(skb);
1452         struct nlattr *nest;
1453         struct tc_action_ops *a_o;
1454         int ret = 0;
1455         struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1456         struct nlattr *tb[TCA_ROOT_MAX + 1];
1457         struct nlattr *count_attr = NULL;
1458         unsigned long jiffy_since = 0;
1459         struct nlattr *kind = NULL;
1460         struct nla_bitfield32 bf;
1461         u32 msecs_since = 0;
1462         u32 act_count = 0;
1463
1464         ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
1465                                      TCA_ROOT_MAX, tcaa_policy, cb->extack);
1466         if (ret < 0)
1467                 return ret;
1468
1469         kind = find_dump_kind(tb);
1470         if (kind == NULL) {
1471                 pr_info("tc_dump_action: action bad kind\n");
1472                 return 0;
1473         }
1474
1475         a_o = tc_lookup_action(kind);
1476         if (a_o == NULL)
1477                 return 0;
1478
1479         cb->args[2] = 0;
1480         if (tb[TCA_ROOT_FLAGS]) {
1481                 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1482                 cb->args[2] = bf.value;
1483         }
1484
1485         if (tb[TCA_ROOT_TIME_DELTA]) {
1486                 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1487         }
1488
1489         nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1490                         cb->nlh->nlmsg_type, sizeof(*t), 0);
1491         if (!nlh)
1492                 goto out_module_put;
1493
1494         if (msecs_since)
1495                 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1496
1497         t = nlmsg_data(nlh);
1498         t->tca_family = AF_UNSPEC;
1499         t->tca__pad1 = 0;
1500         t->tca__pad2 = 0;
1501         cb->args[3] = jiffy_since;
1502         count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1503         if (!count_attr)
1504                 goto out_module_put;
1505
1506         nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1507         if (nest == NULL)
1508                 goto out_module_put;
1509
1510         ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1511         if (ret < 0)
1512                 goto out_module_put;
1513
1514         if (ret > 0) {
1515                 nla_nest_end(skb, nest);
1516                 ret = skb->len;
1517                 act_count = cb->args[1];
1518                 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1519                 cb->args[1] = 0;
1520         } else
1521                 nlmsg_trim(skb, b);
1522
1523         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1524         if (NETLINK_CB(cb->skb).portid && ret)
1525                 nlh->nlmsg_flags |= NLM_F_MULTI;
1526         module_put(a_o->owner);
1527         return skb->len;
1528
1529 out_module_put:
1530         module_put(a_o->owner);
1531         nlmsg_trim(skb, b);
1532         return skb->len;
1533 }
1534
1535 static int __init tc_action_init(void)
1536 {
1537         rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1538         rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1539         rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1540                       0);
1541
1542         return 0;
1543 }
1544
1545 subsys_initcall(tc_action_init);