2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
17 #include <net/net_namespace.h>
19 #include <net/addrconf.h>
21 #include <net/if_inet6.h>
25 #include "ncsi-netlink.h"
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
30 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
32 struct ncsi_dev *nd = &ndp->ndev;
33 struct ncsi_package *np;
34 struct ncsi_channel *nc;
37 nd->state = ncsi_dev_state_functional;
44 NCSI_FOR_EACH_PACKAGE(ndp, np) {
45 NCSI_FOR_EACH_CHANNEL(np, nc) {
46 spin_lock_irqsave(&nc->lock, flags);
48 if (!list_empty(&nc->link) ||
49 nc->state != NCSI_CHANNEL_ACTIVE) {
50 spin_unlock_irqrestore(&nc->lock, flags);
54 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
55 spin_unlock_irqrestore(&nc->lock, flags);
60 spin_unlock_irqrestore(&nc->lock, flags);
68 static void ncsi_channel_monitor(struct timer_list *t)
70 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
71 struct ncsi_package *np = nc->package;
72 struct ncsi_dev_priv *ndp = np->ndp;
73 struct ncsi_channel_mode *ncm;
74 struct ncsi_cmd_arg nca;
75 bool enabled, chained;
76 unsigned int monitor_state;
80 spin_lock_irqsave(&nc->lock, flags);
82 chained = !list_empty(&nc->link);
83 enabled = nc->monitor.enabled;
84 monitor_state = nc->monitor.state;
85 spin_unlock_irqrestore(&nc->lock, flags);
87 if (!enabled || chained) {
88 ncsi_stop_channel_monitor(nc);
91 if (state != NCSI_CHANNEL_INACTIVE &&
92 state != NCSI_CHANNEL_ACTIVE) {
93 ncsi_stop_channel_monitor(nc);
97 switch (monitor_state) {
98 case NCSI_CHANNEL_MONITOR_START:
99 case NCSI_CHANNEL_MONITOR_RETRY:
101 nca.package = np->id;
102 nca.channel = nc->id;
103 nca.type = NCSI_PKT_CMD_GLS;
105 ret = ncsi_xmit_cmd(&nca);
107 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
110 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
113 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
115 if (!(ndp->flags & NCSI_DEV_HWA)) {
116 ncsi_report_link(ndp, true);
117 ndp->flags |= NCSI_DEV_RESHUFFLE;
120 ncsi_stop_channel_monitor(nc);
122 ncm = &nc->modes[NCSI_MODE_LINK];
123 spin_lock_irqsave(&nc->lock, flags);
124 nc->state = NCSI_CHANNEL_INVISIBLE;
125 ncm->data[2] &= ~0x1;
126 spin_unlock_irqrestore(&nc->lock, flags);
128 spin_lock_irqsave(&ndp->lock, flags);
129 nc->state = NCSI_CHANNEL_ACTIVE;
130 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
131 spin_unlock_irqrestore(&ndp->lock, flags);
132 ncsi_process_next_channel(ndp);
136 spin_lock_irqsave(&nc->lock, flags);
138 spin_unlock_irqrestore(&nc->lock, flags);
139 mod_timer(&nc->monitor.timer, jiffies + HZ);
142 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
146 spin_lock_irqsave(&nc->lock, flags);
147 WARN_ON_ONCE(nc->monitor.enabled);
148 nc->monitor.enabled = true;
149 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
150 spin_unlock_irqrestore(&nc->lock, flags);
152 mod_timer(&nc->monitor.timer, jiffies + HZ);
155 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
159 spin_lock_irqsave(&nc->lock, flags);
160 if (!nc->monitor.enabled) {
161 spin_unlock_irqrestore(&nc->lock, flags);
164 nc->monitor.enabled = false;
165 spin_unlock_irqrestore(&nc->lock, flags);
167 del_timer_sync(&nc->monitor.timer);
170 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
173 struct ncsi_channel *nc;
175 NCSI_FOR_EACH_CHANNEL(np, nc) {
183 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
185 struct ncsi_channel *nc, *tmp;
189 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
195 nc->state = NCSI_CHANNEL_INACTIVE;
196 nc->monitor.enabled = false;
197 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
198 spin_lock_init(&nc->lock);
199 INIT_LIST_HEAD(&nc->link);
200 for (index = 0; index < NCSI_CAP_MAX; index++)
201 nc->caps[index].index = index;
202 for (index = 0; index < NCSI_MODE_MAX; index++)
203 nc->modes[index].index = index;
205 spin_lock_irqsave(&np->lock, flags);
206 tmp = ncsi_find_channel(np, id);
208 spin_unlock_irqrestore(&np->lock, flags);
213 list_add_tail_rcu(&nc->node, &np->channels);
215 spin_unlock_irqrestore(&np->lock, flags);
220 static void ncsi_remove_channel(struct ncsi_channel *nc)
222 struct ncsi_package *np = nc->package;
225 spin_lock_irqsave(&nc->lock, flags);
227 /* Release filters */
228 kfree(nc->mac_filter.addrs);
229 kfree(nc->vlan_filter.vids);
231 nc->state = NCSI_CHANNEL_INACTIVE;
232 spin_unlock_irqrestore(&nc->lock, flags);
233 ncsi_stop_channel_monitor(nc);
235 /* Remove and free channel */
236 spin_lock_irqsave(&np->lock, flags);
237 list_del_rcu(&nc->node);
239 spin_unlock_irqrestore(&np->lock, flags);
244 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
247 struct ncsi_package *np;
249 NCSI_FOR_EACH_PACKAGE(ndp, np) {
257 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
260 struct ncsi_package *np, *tmp;
263 np = kzalloc(sizeof(*np), GFP_ATOMIC);
269 spin_lock_init(&np->lock);
270 INIT_LIST_HEAD(&np->channels);
272 spin_lock_irqsave(&ndp->lock, flags);
273 tmp = ncsi_find_package(ndp, id);
275 spin_unlock_irqrestore(&ndp->lock, flags);
280 list_add_tail_rcu(&np->node, &ndp->packages);
282 spin_unlock_irqrestore(&ndp->lock, flags);
287 void ncsi_remove_package(struct ncsi_package *np)
289 struct ncsi_dev_priv *ndp = np->ndp;
290 struct ncsi_channel *nc, *tmp;
293 /* Release all child channels */
294 list_for_each_entry_safe(nc, tmp, &np->channels, node)
295 ncsi_remove_channel(nc);
297 /* Remove and free package */
298 spin_lock_irqsave(&ndp->lock, flags);
299 list_del_rcu(&np->node);
301 spin_unlock_irqrestore(&ndp->lock, flags);
306 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
308 struct ncsi_package **np,
309 struct ncsi_channel **nc)
311 struct ncsi_package *p;
312 struct ncsi_channel *c;
314 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
315 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
323 /* For two consecutive NCSI commands, the packet IDs shouldn't
324 * be same. Otherwise, the bogus response might be replied. So
325 * the available IDs are allocated in round-robin fashion.
327 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
328 unsigned int req_flags)
330 struct ncsi_request *nr = NULL;
331 int i, limit = ARRAY_SIZE(ndp->requests);
334 /* Check if there is one available request until the ceiling */
335 spin_lock_irqsave(&ndp->lock, flags);
336 for (i = ndp->request_id; i < limit; i++) {
337 if (ndp->requests[i].used)
340 nr = &ndp->requests[i];
342 nr->flags = req_flags;
343 ndp->request_id = i + 1;
347 /* Fail back to check from the starting cursor */
348 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
349 if (ndp->requests[i].used)
352 nr = &ndp->requests[i];
354 nr->flags = req_flags;
355 ndp->request_id = i + 1;
360 spin_unlock_irqrestore(&ndp->lock, flags);
364 void ncsi_free_request(struct ncsi_request *nr)
366 struct ncsi_dev_priv *ndp = nr->ndp;
367 struct sk_buff *cmd, *rsp;
373 del_timer_sync(&nr->timer);
376 spin_lock_irqsave(&ndp->lock, flags);
382 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
383 spin_unlock_irqrestore(&ndp->lock, flags);
385 if (driven && cmd && --ndp->pending_req_num == 0)
386 schedule_work(&ndp->work);
388 /* Release command and response */
393 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
395 struct ncsi_dev_priv *ndp;
397 NCSI_FOR_EACH_DEV(ndp) {
398 if (ndp->ndev.dev == dev)
405 static void ncsi_request_timeout(struct timer_list *t)
407 struct ncsi_request *nr = from_timer(nr, t, timer);
408 struct ncsi_dev_priv *ndp = nr->ndp;
411 /* If the request already had associated response,
412 * let the response handler to release it.
414 spin_lock_irqsave(&ndp->lock, flags);
416 if (nr->rsp || !nr->cmd) {
417 spin_unlock_irqrestore(&ndp->lock, flags);
420 spin_unlock_irqrestore(&ndp->lock, flags);
422 /* Release the request */
423 ncsi_free_request(nr);
426 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
428 struct ncsi_dev *nd = &ndp->ndev;
429 struct ncsi_package *np = ndp->active_package;
430 struct ncsi_channel *nc = ndp->active_channel;
431 struct ncsi_cmd_arg nca;
436 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
438 case ncsi_dev_state_suspend:
439 nd->state = ncsi_dev_state_suspend_select;
441 case ncsi_dev_state_suspend_select:
442 ndp->pending_req_num = 1;
444 nca.type = NCSI_PKT_CMD_SP;
445 nca.package = np->id;
446 nca.channel = NCSI_RESERVED_CHANNEL;
447 if (ndp->flags & NCSI_DEV_HWA)
452 /* To retrieve the last link states of channels in current
453 * package when current active channel needs fail over to
454 * another one. It means we will possibly select another
455 * channel as next active one. The link states of channels
456 * are most important factor of the selection. So we need
457 * accurate link states. Unfortunately, the link states on
458 * inactive channels can't be updated with LSC AEN in time.
460 if (ndp->flags & NCSI_DEV_RESHUFFLE)
461 nd->state = ncsi_dev_state_suspend_gls;
463 nd->state = ncsi_dev_state_suspend_dcnt;
464 ret = ncsi_xmit_cmd(&nca);
469 case ncsi_dev_state_suspend_gls:
470 ndp->pending_req_num = np->channel_num;
472 nca.type = NCSI_PKT_CMD_GLS;
473 nca.package = np->id;
475 nd->state = ncsi_dev_state_suspend_dcnt;
476 NCSI_FOR_EACH_CHANNEL(np, nc) {
477 nca.channel = nc->id;
478 ret = ncsi_xmit_cmd(&nca);
484 case ncsi_dev_state_suspend_dcnt:
485 ndp->pending_req_num = 1;
487 nca.type = NCSI_PKT_CMD_DCNT;
488 nca.package = np->id;
489 nca.channel = nc->id;
491 nd->state = ncsi_dev_state_suspend_dc;
492 ret = ncsi_xmit_cmd(&nca);
497 case ncsi_dev_state_suspend_dc:
498 ndp->pending_req_num = 1;
500 nca.type = NCSI_PKT_CMD_DC;
501 nca.package = np->id;
502 nca.channel = nc->id;
505 nd->state = ncsi_dev_state_suspend_deselect;
506 ret = ncsi_xmit_cmd(&nca);
511 case ncsi_dev_state_suspend_deselect:
512 ndp->pending_req_num = 1;
514 nca.type = NCSI_PKT_CMD_DP;
515 nca.package = np->id;
516 nca.channel = NCSI_RESERVED_CHANNEL;
518 nd->state = ncsi_dev_state_suspend_done;
519 ret = ncsi_xmit_cmd(&nca);
524 case ncsi_dev_state_suspend_done:
525 spin_lock_irqsave(&nc->lock, flags);
526 nc->state = NCSI_CHANNEL_INACTIVE;
527 spin_unlock_irqrestore(&nc->lock, flags);
528 ncsi_process_next_channel(ndp);
532 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
538 nd->state = ncsi_dev_state_functional;
541 /* Check the VLAN filter bitmap for a set filter, and construct a
542 * "Set VLAN Filter - Disable" packet if found.
544 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
545 struct ncsi_cmd_arg *nca)
547 struct ncsi_channel_vlan_filter *ncf;
553 ncf = &nc->vlan_filter;
554 bitmap = &ncf->bitmap;
556 spin_lock_irqsave(&nc->lock, flags);
557 index = find_next_bit(bitmap, ncf->n_vids, 0);
558 if (index >= ncf->n_vids) {
559 spin_unlock_irqrestore(&nc->lock, flags);
562 vid = ncf->vids[index];
564 clear_bit(index, bitmap);
565 ncf->vids[index] = 0;
566 spin_unlock_irqrestore(&nc->lock, flags);
568 nca->type = NCSI_PKT_CMD_SVF;
570 /* HW filter index starts at 1 */
571 nca->bytes[6] = index + 1;
572 nca->bytes[7] = 0x00;
576 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
579 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
580 struct ncsi_cmd_arg *nca)
582 struct ncsi_channel_vlan_filter *ncf;
583 struct vlan_vid *vlan = NULL;
589 if (list_empty(&ndp->vlan_vids))
592 ncf = &nc->vlan_filter;
593 bitmap = &ncf->bitmap;
595 spin_lock_irqsave(&nc->lock, flags);
598 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
600 for (i = 0; i < ncf->n_vids; i++)
601 if (ncf->vids[i] == vid) {
611 /* No VLAN ID is not set */
612 spin_unlock_irqrestore(&nc->lock, flags);
616 index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
617 if (index < 0 || index >= ncf->n_vids) {
618 netdev_err(ndp->ndev.dev,
619 "Channel %u already has all VLAN filters set\n",
621 spin_unlock_irqrestore(&nc->lock, flags);
625 ncf->vids[index] = vid;
626 set_bit(index, bitmap);
627 spin_unlock_irqrestore(&nc->lock, flags);
629 nca->type = NCSI_PKT_CMD_SVF;
631 /* HW filter index starts at 1 */
632 nca->bytes[6] = index + 1;
633 nca->bytes[7] = 0x01;
638 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
640 struct ncsi_dev *nd = &ndp->ndev;
641 struct net_device *dev = nd->dev;
642 struct ncsi_package *np = ndp->active_package;
643 struct ncsi_channel *nc = ndp->active_channel;
644 struct ncsi_channel *hot_nc = NULL;
645 struct ncsi_cmd_arg nca;
651 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
653 case ncsi_dev_state_config:
654 case ncsi_dev_state_config_sp:
655 ndp->pending_req_num = 1;
657 /* Select the specific package */
658 nca.type = NCSI_PKT_CMD_SP;
659 if (ndp->flags & NCSI_DEV_HWA)
663 nca.package = np->id;
664 nca.channel = NCSI_RESERVED_CHANNEL;
665 ret = ncsi_xmit_cmd(&nca);
667 netdev_err(ndp->ndev.dev,
668 "NCSI: Failed to transmit CMD_SP\n");
672 nd->state = ncsi_dev_state_config_cis;
674 case ncsi_dev_state_config_cis:
675 ndp->pending_req_num = 1;
677 /* Clear initial state */
678 nca.type = NCSI_PKT_CMD_CIS;
679 nca.package = np->id;
680 nca.channel = nc->id;
681 ret = ncsi_xmit_cmd(&nca);
683 netdev_err(ndp->ndev.dev,
684 "NCSI: Failed to transmit CMD_CIS\n");
688 nd->state = ncsi_dev_state_config_clear_vids;
690 case ncsi_dev_state_config_clear_vids:
691 case ncsi_dev_state_config_svf:
692 case ncsi_dev_state_config_ev:
693 case ncsi_dev_state_config_sma:
694 case ncsi_dev_state_config_ebf:
695 #if IS_ENABLED(CONFIG_IPV6)
696 case ncsi_dev_state_config_egmf:
698 case ncsi_dev_state_config_ecnt:
699 case ncsi_dev_state_config_ec:
700 case ncsi_dev_state_config_ae:
701 case ncsi_dev_state_config_gls:
702 ndp->pending_req_num = 1;
704 nca.package = np->id;
705 nca.channel = nc->id;
707 /* Clear any active filters on the channel before setting */
708 if (nd->state == ncsi_dev_state_config_clear_vids) {
709 ret = clear_one_vid(ndp, nc, &nca);
711 nd->state = ncsi_dev_state_config_svf;
712 schedule_work(&ndp->work);
716 nd->state = ncsi_dev_state_config_clear_vids;
717 /* Add known VLAN tags to the filter */
718 } else if (nd->state == ncsi_dev_state_config_svf) {
719 ret = set_one_vid(ndp, nc, &nca);
721 nd->state = ncsi_dev_state_config_ev;
722 schedule_work(&ndp->work);
726 nd->state = ncsi_dev_state_config_svf;
727 /* Enable/Disable the VLAN filter */
728 } else if (nd->state == ncsi_dev_state_config_ev) {
729 if (list_empty(&ndp->vlan_vids)) {
730 nca.type = NCSI_PKT_CMD_DV;
732 nca.type = NCSI_PKT_CMD_EV;
733 nca.bytes[3] = NCSI_CAP_VLAN_NO;
735 nd->state = ncsi_dev_state_config_sma;
736 } else if (nd->state == ncsi_dev_state_config_sma) {
737 /* Use first entry in unicast filter table. Note that
738 * the MAC filter table starts from entry 1 instead of
741 nca.type = NCSI_PKT_CMD_SMA;
742 for (index = 0; index < 6; index++)
743 nca.bytes[index] = dev->dev_addr[index];
746 nd->state = ncsi_dev_state_config_ebf;
747 } else if (nd->state == ncsi_dev_state_config_ebf) {
748 nca.type = NCSI_PKT_CMD_EBF;
749 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
750 nd->state = ncsi_dev_state_config_ecnt;
751 #if IS_ENABLED(CONFIG_IPV6)
752 if (ndp->inet6_addr_num > 0 &&
753 (nc->caps[NCSI_CAP_GENERIC].cap &
754 NCSI_CAP_GENERIC_MC))
755 nd->state = ncsi_dev_state_config_egmf;
757 nd->state = ncsi_dev_state_config_ecnt;
758 } else if (nd->state == ncsi_dev_state_config_egmf) {
759 nca.type = NCSI_PKT_CMD_EGMF;
760 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
761 nd->state = ncsi_dev_state_config_ecnt;
762 #endif /* CONFIG_IPV6 */
763 } else if (nd->state == ncsi_dev_state_config_ecnt) {
764 nca.type = NCSI_PKT_CMD_ECNT;
765 nd->state = ncsi_dev_state_config_ec;
766 } else if (nd->state == ncsi_dev_state_config_ec) {
767 /* Enable AEN if it's supported */
768 nca.type = NCSI_PKT_CMD_EC;
769 nd->state = ncsi_dev_state_config_ae;
770 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
771 nd->state = ncsi_dev_state_config_gls;
772 } else if (nd->state == ncsi_dev_state_config_ae) {
773 nca.type = NCSI_PKT_CMD_AE;
775 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
776 nd->state = ncsi_dev_state_config_gls;
777 } else if (nd->state == ncsi_dev_state_config_gls) {
778 nca.type = NCSI_PKT_CMD_GLS;
779 nd->state = ncsi_dev_state_config_done;
782 ret = ncsi_xmit_cmd(&nca);
784 netdev_err(ndp->ndev.dev,
785 "NCSI: Failed to transmit CMD %x\n",
790 case ncsi_dev_state_config_done:
791 netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
793 spin_lock_irqsave(&nc->lock, flags);
794 if (nc->reconfigure_needed) {
795 /* This channel's configuration has been updated
796 * part-way during the config state - start the
797 * channel configuration over
799 nc->reconfigure_needed = false;
800 nc->state = NCSI_CHANNEL_INACTIVE;
801 spin_unlock_irqrestore(&nc->lock, flags);
803 spin_lock_irqsave(&ndp->lock, flags);
804 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
805 spin_unlock_irqrestore(&ndp->lock, flags);
807 netdev_dbg(dev, "Dirty NCSI channel state reset\n");
808 ncsi_process_next_channel(ndp);
812 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
814 nc->state = NCSI_CHANNEL_ACTIVE;
817 nc->state = NCSI_CHANNEL_INACTIVE;
818 netdev_dbg(ndp->ndev.dev,
819 "NCSI: channel %u link down after config\n",
822 spin_unlock_irqrestore(&nc->lock, flags);
824 /* Update the hot channel */
825 spin_lock_irqsave(&ndp->lock, flags);
826 ndp->hot_channel = hot_nc;
827 spin_unlock_irqrestore(&ndp->lock, flags);
829 ncsi_start_channel_monitor(nc);
830 ncsi_process_next_channel(ndp);
833 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
840 ncsi_report_link(ndp, true);
843 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
845 struct ncsi_package *np, *force_package;
846 struct ncsi_channel *nc, *found, *hot_nc, *force_channel;
847 struct ncsi_channel_mode *ncm;
850 spin_lock_irqsave(&ndp->lock, flags);
851 hot_nc = ndp->hot_channel;
852 force_channel = ndp->force_channel;
853 force_package = ndp->force_package;
854 spin_unlock_irqrestore(&ndp->lock, flags);
856 /* Force a specific channel whether or not it has link if we have been
857 * configured to do so
859 if (force_package && force_channel) {
860 found = force_channel;
861 ncm = &found->modes[NCSI_MODE_LINK];
862 if (!(ncm->data[2] & 0x1))
863 netdev_info(ndp->ndev.dev,
864 "NCSI: Channel %u forced, but it is link down\n",
869 /* The search is done once an inactive channel with up
873 NCSI_FOR_EACH_PACKAGE(ndp, np) {
874 if (ndp->force_package && np != ndp->force_package)
876 NCSI_FOR_EACH_CHANNEL(np, nc) {
877 spin_lock_irqsave(&nc->lock, flags);
879 if (!list_empty(&nc->link) ||
880 nc->state != NCSI_CHANNEL_INACTIVE) {
881 spin_unlock_irqrestore(&nc->lock, flags);
891 ncm = &nc->modes[NCSI_MODE_LINK];
892 if (ncm->data[2] & 0x1) {
893 spin_unlock_irqrestore(&nc->lock, flags);
898 spin_unlock_irqrestore(&nc->lock, flags);
903 netdev_warn(ndp->ndev.dev,
904 "NCSI: No channel found with link\n");
905 ncsi_report_link(ndp, true);
909 ncm = &found->modes[NCSI_MODE_LINK];
910 netdev_dbg(ndp->ndev.dev,
911 "NCSI: Channel %u added to queue (link %s)\n",
912 found->id, ncm->data[2] & 0x1 ? "up" : "down");
915 spin_lock_irqsave(&ndp->lock, flags);
916 list_add_tail_rcu(&found->link, &ndp->channel_queue);
917 spin_unlock_irqrestore(&ndp->lock, flags);
919 return ncsi_process_next_channel(ndp);
922 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
924 struct ncsi_package *np;
925 struct ncsi_channel *nc;
927 bool has_channel = false;
929 /* The hardware arbitration is disabled if any one channel
930 * doesn't support explicitly.
932 NCSI_FOR_EACH_PACKAGE(ndp, np) {
933 NCSI_FOR_EACH_CHANNEL(np, nc) {
936 cap = nc->caps[NCSI_CAP_GENERIC].cap;
937 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
938 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
939 NCSI_CAP_GENERIC_HWA_SUPPORT) {
940 ndp->flags &= ~NCSI_DEV_HWA;
947 ndp->flags |= NCSI_DEV_HWA;
951 ndp->flags &= ~NCSI_DEV_HWA;
955 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
957 struct ncsi_package *np;
958 struct ncsi_channel *nc;
961 /* Move all available channels to processing queue */
962 spin_lock_irqsave(&ndp->lock, flags);
963 NCSI_FOR_EACH_PACKAGE(ndp, np) {
964 NCSI_FOR_EACH_CHANNEL(np, nc) {
965 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
966 !list_empty(&nc->link));
967 ncsi_stop_channel_monitor(nc);
968 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
971 spin_unlock_irqrestore(&ndp->lock, flags);
973 /* We can have no channels in extremely case */
974 if (list_empty(&ndp->channel_queue)) {
975 netdev_err(ndp->ndev.dev,
976 "NCSI: No available channels for HWA\n");
977 ncsi_report_link(ndp, false);
981 return ncsi_process_next_channel(ndp);
984 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
986 struct ncsi_dev *nd = &ndp->ndev;
987 struct ncsi_package *np;
988 struct ncsi_channel *nc;
989 struct ncsi_cmd_arg nca;
994 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
996 case ncsi_dev_state_probe:
997 nd->state = ncsi_dev_state_probe_deselect;
999 case ncsi_dev_state_probe_deselect:
1000 ndp->pending_req_num = 8;
1002 /* Deselect all possible packages */
1003 nca.type = NCSI_PKT_CMD_DP;
1004 nca.channel = NCSI_RESERVED_CHANNEL;
1005 for (index = 0; index < 8; index++) {
1006 nca.package = index;
1007 ret = ncsi_xmit_cmd(&nca);
1012 nd->state = ncsi_dev_state_probe_package;
1014 case ncsi_dev_state_probe_package:
1015 ndp->pending_req_num = 16;
1017 /* Select all possible packages */
1018 nca.type = NCSI_PKT_CMD_SP;
1020 nca.channel = NCSI_RESERVED_CHANNEL;
1021 for (index = 0; index < 8; index++) {
1022 nca.package = index;
1023 ret = ncsi_xmit_cmd(&nca);
1028 /* Disable all possible packages */
1029 nca.type = NCSI_PKT_CMD_DP;
1030 for (index = 0; index < 8; index++) {
1031 nca.package = index;
1032 ret = ncsi_xmit_cmd(&nca);
1037 nd->state = ncsi_dev_state_probe_channel;
1039 case ncsi_dev_state_probe_channel:
1040 if (!ndp->active_package)
1041 ndp->active_package = list_first_or_null_rcu(
1042 &ndp->packages, struct ncsi_package, node);
1043 else if (list_is_last(&ndp->active_package->node,
1045 ndp->active_package = NULL;
1047 ndp->active_package = list_next_entry(
1048 ndp->active_package, node);
1050 /* All available packages and channels are enumerated. The
1051 * enumeration happens for once when the NCSI interface is
1052 * started. So we need continue to start the interface after
1055 * We have to choose an active channel before configuring it.
1056 * Note that we possibly don't have active channel in extreme
1059 if (!ndp->active_package) {
1060 ndp->flags |= NCSI_DEV_PROBED;
1061 if (ncsi_check_hwa(ndp))
1062 ncsi_enable_hwa(ndp);
1064 ncsi_choose_active_channel(ndp);
1068 /* Select the active package */
1069 ndp->pending_req_num = 1;
1070 nca.type = NCSI_PKT_CMD_SP;
1072 nca.package = ndp->active_package->id;
1073 nca.channel = NCSI_RESERVED_CHANNEL;
1074 ret = ncsi_xmit_cmd(&nca);
1078 nd->state = ncsi_dev_state_probe_cis;
1080 case ncsi_dev_state_probe_cis:
1081 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1083 /* Clear initial state */
1084 nca.type = NCSI_PKT_CMD_CIS;
1085 nca.package = ndp->active_package->id;
1086 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1087 nca.channel = index;
1088 ret = ncsi_xmit_cmd(&nca);
1093 nd->state = ncsi_dev_state_probe_gvi;
1095 case ncsi_dev_state_probe_gvi:
1096 case ncsi_dev_state_probe_gc:
1097 case ncsi_dev_state_probe_gls:
1098 np = ndp->active_package;
1099 ndp->pending_req_num = np->channel_num;
1101 /* Retrieve version, capability or link status */
1102 if (nd->state == ncsi_dev_state_probe_gvi)
1103 nca.type = NCSI_PKT_CMD_GVI;
1104 else if (nd->state == ncsi_dev_state_probe_gc)
1105 nca.type = NCSI_PKT_CMD_GC;
1107 nca.type = NCSI_PKT_CMD_GLS;
1109 nca.package = np->id;
1110 NCSI_FOR_EACH_CHANNEL(np, nc) {
1111 nca.channel = nc->id;
1112 ret = ncsi_xmit_cmd(&nca);
1117 if (nd->state == ncsi_dev_state_probe_gvi)
1118 nd->state = ncsi_dev_state_probe_gc;
1119 else if (nd->state == ncsi_dev_state_probe_gc)
1120 nd->state = ncsi_dev_state_probe_gls;
1122 nd->state = ncsi_dev_state_probe_dp;
1124 case ncsi_dev_state_probe_dp:
1125 ndp->pending_req_num = 1;
1127 /* Deselect the active package */
1128 nca.type = NCSI_PKT_CMD_DP;
1129 nca.package = ndp->active_package->id;
1130 nca.channel = NCSI_RESERVED_CHANNEL;
1131 ret = ncsi_xmit_cmd(&nca);
1135 /* Scan channels in next package */
1136 nd->state = ncsi_dev_state_probe_channel;
1139 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1145 netdev_err(ndp->ndev.dev,
1146 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1148 ncsi_report_link(ndp, true);
1151 static void ncsi_dev_work(struct work_struct *work)
1153 struct ncsi_dev_priv *ndp = container_of(work,
1154 struct ncsi_dev_priv, work);
1155 struct ncsi_dev *nd = &ndp->ndev;
1157 switch (nd->state & ncsi_dev_state_major) {
1158 case ncsi_dev_state_probe:
1159 ncsi_probe_channel(ndp);
1161 case ncsi_dev_state_suspend:
1162 ncsi_suspend_channel(ndp);
1164 case ncsi_dev_state_config:
1165 ncsi_configure_channel(ndp);
1168 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1173 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1175 struct ncsi_channel *nc;
1177 unsigned long flags;
1179 spin_lock_irqsave(&ndp->lock, flags);
1180 nc = list_first_or_null_rcu(&ndp->channel_queue,
1181 struct ncsi_channel, link);
1183 spin_unlock_irqrestore(&ndp->lock, flags);
1187 list_del_init(&nc->link);
1188 spin_unlock_irqrestore(&ndp->lock, flags);
1190 spin_lock_irqsave(&nc->lock, flags);
1191 old_state = nc->state;
1192 nc->state = NCSI_CHANNEL_INVISIBLE;
1193 spin_unlock_irqrestore(&nc->lock, flags);
1195 ndp->active_channel = nc;
1196 ndp->active_package = nc->package;
1198 switch (old_state) {
1199 case NCSI_CHANNEL_INACTIVE:
1200 ndp->ndev.state = ncsi_dev_state_config;
1201 netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1203 ncsi_configure_channel(ndp);
1205 case NCSI_CHANNEL_ACTIVE:
1206 ndp->ndev.state = ncsi_dev_state_suspend;
1207 netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1209 ncsi_suspend_channel(ndp);
1212 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1213 old_state, nc->package->id, nc->id);
1214 ncsi_report_link(ndp, false);
1221 ndp->active_channel = NULL;
1222 ndp->active_package = NULL;
1223 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1224 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1225 return ncsi_choose_active_channel(ndp);
1228 ncsi_report_link(ndp, false);
1232 #if IS_ENABLED(CONFIG_IPV6)
1233 static int ncsi_inet6addr_event(struct notifier_block *this,
1234 unsigned long event, void *data)
1236 struct inet6_ifaddr *ifa = data;
1237 struct net_device *dev = ifa->idev->dev;
1238 struct ncsi_dev *nd = ncsi_find_dev(dev);
1239 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1240 struct ncsi_package *np;
1241 struct ncsi_channel *nc;
1242 struct ncsi_cmd_arg nca;
1246 if (!ndp || (ipv6_addr_type(&ifa->addr) &
1247 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1252 action = (++ndp->inet6_addr_num) == 1;
1253 nca.type = NCSI_PKT_CMD_EGMF;
1256 action = (--ndp->inet6_addr_num == 0);
1257 nca.type = NCSI_PKT_CMD_DGMF;
1263 /* We might not have active channel or packages. The IPv6
1264 * required multicast will be enabled when active channel
1265 * or packages are chosen.
1267 np = ndp->active_package;
1268 nc = ndp->active_channel;
1269 if (!action || !np || !nc)
1272 /* We needn't enable or disable it if the function isn't supported */
1273 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1278 nca.package = np->id;
1279 nca.channel = nc->id;
1280 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1281 ret = ncsi_xmit_cmd(&nca);
1283 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1284 (event == NETDEV_UP) ? "enable" : "disable", ret);
1291 static struct notifier_block ncsi_inet6addr_notifier = {
1292 .notifier_call = ncsi_inet6addr_event,
1294 #endif /* CONFIG_IPV6 */
1296 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1298 struct ncsi_dev *nd = &ndp->ndev;
1299 struct ncsi_channel *nc;
1300 struct ncsi_package *np;
1301 unsigned long flags;
1304 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1305 NCSI_FOR_EACH_CHANNEL(np, nc) {
1306 spin_lock_irqsave(&nc->lock, flags);
1308 /* Channels may be busy, mark dirty instead of
1310 * a) not ACTIVE (configured)
1311 * b) in the channel_queue (to be configured)
1312 * c) it's ndev is in the config state
1314 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1315 if ((ndp->ndev.state & 0xff00) ==
1316 ncsi_dev_state_config ||
1317 !list_empty(&nc->link)) {
1319 "NCSI: channel %p marked dirty\n",
1321 nc->reconfigure_needed = true;
1323 spin_unlock_irqrestore(&nc->lock, flags);
1327 spin_unlock_irqrestore(&nc->lock, flags);
1329 ncsi_stop_channel_monitor(nc);
1330 spin_lock_irqsave(&nc->lock, flags);
1331 nc->state = NCSI_CHANNEL_INACTIVE;
1332 spin_unlock_irqrestore(&nc->lock, flags);
1334 spin_lock_irqsave(&ndp->lock, flags);
1335 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1336 spin_unlock_irqrestore(&ndp->lock, flags);
1338 netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1346 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1348 struct ncsi_dev_priv *ndp;
1349 unsigned int n_vids = 0;
1350 struct vlan_vid *vlan;
1351 struct ncsi_dev *nd;
1357 nd = ncsi_find_dev(dev);
1359 netdev_warn(dev, "NCSI: No net_device?\n");
1363 ndp = TO_NCSI_DEV_PRIV(nd);
1365 /* Add the VLAN id to our internal list */
1366 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1368 if (vlan->vid == vid) {
1369 netdev_dbg(dev, "NCSI: vid %u already registered\n",
1374 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1376 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1377 vid, NCSI_MAX_VLAN_VIDS);
1381 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1385 vlan->proto = proto;
1387 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1389 netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1391 found = ncsi_kick_channels(ndp) != 0;
1393 return found ? ncsi_process_next_channel(ndp) : 0;
1395 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1397 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1399 struct vlan_vid *vlan, *tmp;
1400 struct ncsi_dev_priv *ndp;
1401 struct ncsi_dev *nd;
1407 nd = ncsi_find_dev(dev);
1409 netdev_warn(dev, "NCSI: no net_device?\n");
1413 ndp = TO_NCSI_DEV_PRIV(nd);
1415 /* Remove the VLAN id from our internal list */
1416 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1417 if (vlan->vid == vid) {
1418 netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1419 list_del_rcu(&vlan->list);
1425 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1429 found = ncsi_kick_channels(ndp) != 0;
1431 return found ? ncsi_process_next_channel(ndp) : 0;
1433 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1435 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1436 void (*handler)(struct ncsi_dev *ndev))
1438 struct ncsi_dev_priv *ndp;
1439 struct ncsi_dev *nd;
1440 unsigned long flags;
1443 /* Check if the device has been registered or not */
1444 nd = ncsi_find_dev(dev);
1448 /* Create NCSI device */
1449 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1454 nd->state = ncsi_dev_state_registered;
1456 nd->handler = handler;
1457 ndp->pending_req_num = 0;
1458 INIT_LIST_HEAD(&ndp->channel_queue);
1459 INIT_LIST_HEAD(&ndp->vlan_vids);
1460 INIT_WORK(&ndp->work, ncsi_dev_work);
1462 /* Initialize private NCSI device */
1463 spin_lock_init(&ndp->lock);
1464 INIT_LIST_HEAD(&ndp->packages);
1465 ndp->request_id = NCSI_REQ_START_IDX;
1466 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1467 ndp->requests[i].id = i;
1468 ndp->requests[i].ndp = ndp;
1469 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1472 spin_lock_irqsave(&ncsi_dev_lock, flags);
1473 #if IS_ENABLED(CONFIG_IPV6)
1474 ndp->inet6_addr_num = 0;
1475 if (list_empty(&ncsi_dev_list))
1476 register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1478 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1479 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1481 /* Register NCSI packet Rx handler */
1482 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1483 ndp->ptype.func = ncsi_rcv_rsp;
1484 ndp->ptype.dev = dev;
1485 dev_add_pack(&ndp->ptype);
1487 /* Set up generic netlink interface */
1488 ncsi_init_netlink(dev);
1492 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1494 int ncsi_start_dev(struct ncsi_dev *nd)
1496 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1499 if (nd->state != ncsi_dev_state_registered &&
1500 nd->state != ncsi_dev_state_functional)
1503 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1504 nd->state = ncsi_dev_state_probe;
1505 schedule_work(&ndp->work);
1509 if (ndp->flags & NCSI_DEV_HWA) {
1510 netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n");
1511 ret = ncsi_enable_hwa(ndp);
1513 ret = ncsi_choose_active_channel(ndp);
1518 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1520 void ncsi_stop_dev(struct ncsi_dev *nd)
1522 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1523 struct ncsi_package *np;
1524 struct ncsi_channel *nc;
1527 unsigned long flags;
1529 /* Stop the channel monitor and reset channel's state */
1530 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1531 NCSI_FOR_EACH_CHANNEL(np, nc) {
1532 ncsi_stop_channel_monitor(nc);
1534 spin_lock_irqsave(&nc->lock, flags);
1535 chained = !list_empty(&nc->link);
1536 old_state = nc->state;
1537 nc->state = NCSI_CHANNEL_INACTIVE;
1538 spin_unlock_irqrestore(&nc->lock, flags);
1540 WARN_ON_ONCE(chained ||
1541 old_state == NCSI_CHANNEL_INVISIBLE);
1545 netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1546 ncsi_report_link(ndp, true);
1548 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1550 void ncsi_unregister_dev(struct ncsi_dev *nd)
1552 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1553 struct ncsi_package *np, *tmp;
1554 unsigned long flags;
1556 dev_remove_pack(&ndp->ptype);
1558 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1559 ncsi_remove_package(np);
1561 spin_lock_irqsave(&ncsi_dev_lock, flags);
1562 list_del_rcu(&ndp->node);
1563 #if IS_ENABLED(CONFIG_IPV6)
1564 if (list_empty(&ncsi_dev_list))
1565 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1567 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1569 ncsi_unregister_netlink(nd->dev);
1573 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);