]> git.samba.org - sfrench/cifs-2.6.git/blob - drivers/thunderbolt/icm.c
Merge remote-tracking branch 'spi/topic/xilinx' into spi-next
[sfrench/cifs-2.6.git] / drivers / thunderbolt / icm.c
1 /*
2  * Internal Thunderbolt Connection Manager. This is a firmware running on
3  * the Thunderbolt host controller performing most of the low-level
4  * handling.
5  *
6  * Copyright (C) 2017, Intel Corporation
7  * Authors: Michael Jamet <michael.jamet@intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_data/x86/apple.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22
23 #include "ctl.h"
24 #include "nhi_regs.h"
25 #include "tb.h"
26
27 #define PCIE2CIO_CMD                    0x30
28 #define PCIE2CIO_CMD_TIMEOUT            BIT(31)
29 #define PCIE2CIO_CMD_START              BIT(30)
30 #define PCIE2CIO_CMD_WRITE              BIT(21)
31 #define PCIE2CIO_CMD_CS_MASK            GENMASK(20, 19)
32 #define PCIE2CIO_CMD_CS_SHIFT           19
33 #define PCIE2CIO_CMD_PORT_MASK          GENMASK(18, 13)
34 #define PCIE2CIO_CMD_PORT_SHIFT         13
35
36 #define PCIE2CIO_WRDATA                 0x34
37 #define PCIE2CIO_RDDATA                 0x38
38
39 #define PHY_PORT_CS1                    0x37
40 #define PHY_PORT_CS1_LINK_DISABLE       BIT(14)
41 #define PHY_PORT_CS1_LINK_STATE_MASK    GENMASK(29, 26)
42 #define PHY_PORT_CS1_LINK_STATE_SHIFT   26
43
44 #define ICM_TIMEOUT                     5000 /* ms */
45 #define ICM_MAX_LINK                    4
46 #define ICM_MAX_DEPTH                   6
47
48 /**
49  * struct icm - Internal connection manager private data
50  * @request_lock: Makes sure only one message is send to ICM at time
51  * @rescan_work: Work used to rescan the surviving switches after resume
52  * @upstream_port: Pointer to the PCIe upstream port this host
53  *                 controller is connected. This is only set for systems
54  *                 where ICM needs to be started manually
55  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
56  *           (only set when @upstream_port is not %NULL)
57  * @safe_mode: ICM is in safe mode
58  * @is_supported: Checks if we can support ICM on this controller
59  * @get_mode: Read and return the ICM firmware mode (optional)
60  * @get_route: Find a route string for given switch
61  * @device_connected: Handle device connected ICM message
62  * @device_disconnected: Handle device disconnected ICM message
63  * @xdomain_connected - Handle XDomain connected ICM message
64  * @xdomain_disconnected - Handle XDomain disconnected ICM message
65  */
66 struct icm {
67         struct mutex request_lock;
68         struct delayed_work rescan_work;
69         struct pci_dev *upstream_port;
70         int vnd_cap;
71         bool safe_mode;
72         bool (*is_supported)(struct tb *tb);
73         int (*get_mode)(struct tb *tb);
74         int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
75         void (*device_connected)(struct tb *tb,
76                                  const struct icm_pkg_header *hdr);
77         void (*device_disconnected)(struct tb *tb,
78                                     const struct icm_pkg_header *hdr);
79         void (*xdomain_connected)(struct tb *tb,
80                                   const struct icm_pkg_header *hdr);
81         void (*xdomain_disconnected)(struct tb *tb,
82                                      const struct icm_pkg_header *hdr);
83 };
84
85 struct icm_notification {
86         struct work_struct work;
87         struct icm_pkg_header *pkg;
88         struct tb *tb;
89 };
90
91 static inline struct tb *icm_to_tb(struct icm *icm)
92 {
93         return ((void *)icm - sizeof(struct tb));
94 }
95
96 static inline u8 phy_port_from_route(u64 route, u8 depth)
97 {
98         u8 link;
99
100         link = depth ? route >> ((depth - 1) * 8) : route;
101         return tb_phy_port_from_link(link);
102 }
103
104 static inline u8 dual_link_from_link(u8 link)
105 {
106         return link ? ((link - 1) ^ 0x01) + 1 : 0;
107 }
108
109 static inline u64 get_route(u32 route_hi, u32 route_lo)
110 {
111         return (u64)route_hi << 32 | route_lo;
112 }
113
114 static bool icm_match(const struct tb_cfg_request *req,
115                       const struct ctl_pkg *pkg)
116 {
117         const struct icm_pkg_header *res_hdr = pkg->buffer;
118         const struct icm_pkg_header *req_hdr = req->request;
119
120         if (pkg->frame.eof != req->response_type)
121                 return false;
122         if (res_hdr->code != req_hdr->code)
123                 return false;
124
125         return true;
126 }
127
128 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
129 {
130         const struct icm_pkg_header *hdr = pkg->buffer;
131
132         if (hdr->packet_id < req->npackets) {
133                 size_t offset = hdr->packet_id * req->response_size;
134
135                 memcpy(req->response + offset, pkg->buffer, req->response_size);
136         }
137
138         return hdr->packet_id == hdr->total_packets - 1;
139 }
140
141 static int icm_request(struct tb *tb, const void *request, size_t request_size,
142                        void *response, size_t response_size, size_t npackets,
143                        unsigned int timeout_msec)
144 {
145         struct icm *icm = tb_priv(tb);
146         int retries = 3;
147
148         do {
149                 struct tb_cfg_request *req;
150                 struct tb_cfg_result res;
151
152                 req = tb_cfg_request_alloc();
153                 if (!req)
154                         return -ENOMEM;
155
156                 req->match = icm_match;
157                 req->copy = icm_copy;
158                 req->request = request;
159                 req->request_size = request_size;
160                 req->request_type = TB_CFG_PKG_ICM_CMD;
161                 req->response = response;
162                 req->npackets = npackets;
163                 req->response_size = response_size;
164                 req->response_type = TB_CFG_PKG_ICM_RESP;
165
166                 mutex_lock(&icm->request_lock);
167                 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
168                 mutex_unlock(&icm->request_lock);
169
170                 tb_cfg_request_put(req);
171
172                 if (res.err != -ETIMEDOUT)
173                         return res.err == 1 ? -EIO : res.err;
174
175                 usleep_range(20, 50);
176         } while (retries--);
177
178         return -ETIMEDOUT;
179 }
180
181 static bool icm_fr_is_supported(struct tb *tb)
182 {
183         return !x86_apple_machine;
184 }
185
186 static inline int icm_fr_get_switch_index(u32 port)
187 {
188         int index;
189
190         if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
191                 return 0;
192
193         index = port >> ICM_PORT_INDEX_SHIFT;
194         return index != 0xff ? index : 0;
195 }
196
197 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
198 {
199         struct icm_fr_pkg_get_topology_response *switches, *sw;
200         struct icm_fr_pkg_get_topology request = {
201                 .hdr = { .code = ICM_GET_TOPOLOGY },
202         };
203         size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
204         int ret, index;
205         u8 i;
206
207         switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
208         if (!switches)
209                 return -ENOMEM;
210
211         ret = icm_request(tb, &request, sizeof(request), switches,
212                           sizeof(*switches), npackets, ICM_TIMEOUT);
213         if (ret)
214                 goto err_free;
215
216         sw = &switches[0];
217         index = icm_fr_get_switch_index(sw->ports[link]);
218         if (!index) {
219                 ret = -ENODEV;
220                 goto err_free;
221         }
222
223         sw = &switches[index];
224         for (i = 1; i < depth; i++) {
225                 unsigned int j;
226
227                 if (!(sw->first_data & ICM_SWITCH_USED)) {
228                         ret = -ENODEV;
229                         goto err_free;
230                 }
231
232                 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
233                         index = icm_fr_get_switch_index(sw->ports[j]);
234                         if (index > sw->switch_index) {
235                                 sw = &switches[index];
236                                 break;
237                         }
238                 }
239         }
240
241         *route = get_route(sw->route_hi, sw->route_lo);
242
243 err_free:
244         kfree(switches);
245         return ret;
246 }
247
248 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
249 {
250         struct icm_fr_pkg_approve_device request;
251         struct icm_fr_pkg_approve_device reply;
252         int ret;
253
254         memset(&request, 0, sizeof(request));
255         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
256         request.hdr.code = ICM_APPROVE_DEVICE;
257         request.connection_id = sw->connection_id;
258         request.connection_key = sw->connection_key;
259
260         memset(&reply, 0, sizeof(reply));
261         /* Use larger timeout as establishing tunnels can take some time */
262         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
263                           1, 10000);
264         if (ret)
265                 return ret;
266
267         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
268                 tb_warn(tb, "PCIe tunnel creation failed\n");
269                 return -EIO;
270         }
271
272         return 0;
273 }
274
275 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
276 {
277         struct icm_fr_pkg_add_device_key request;
278         struct icm_fr_pkg_add_device_key_response reply;
279         int ret;
280
281         memset(&request, 0, sizeof(request));
282         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
283         request.hdr.code = ICM_ADD_DEVICE_KEY;
284         request.connection_id = sw->connection_id;
285         request.connection_key = sw->connection_key;
286         memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
287
288         memset(&reply, 0, sizeof(reply));
289         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
290                           1, ICM_TIMEOUT);
291         if (ret)
292                 return ret;
293
294         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
295                 tb_warn(tb, "Adding key to switch failed\n");
296                 return -EIO;
297         }
298
299         return 0;
300 }
301
302 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
303                                        const u8 *challenge, u8 *response)
304 {
305         struct icm_fr_pkg_challenge_device request;
306         struct icm_fr_pkg_challenge_device_response reply;
307         int ret;
308
309         memset(&request, 0, sizeof(request));
310         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
311         request.hdr.code = ICM_CHALLENGE_DEVICE;
312         request.connection_id = sw->connection_id;
313         request.connection_key = sw->connection_key;
314         memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
315
316         memset(&reply, 0, sizeof(reply));
317         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
318                           1, ICM_TIMEOUT);
319         if (ret)
320                 return ret;
321
322         if (reply.hdr.flags & ICM_FLAGS_ERROR)
323                 return -EKEYREJECTED;
324         if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
325                 return -ENOKEY;
326
327         memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
328
329         return 0;
330 }
331
332 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
333 {
334         struct icm_fr_pkg_approve_xdomain_response reply;
335         struct icm_fr_pkg_approve_xdomain request;
336         int ret;
337
338         memset(&request, 0, sizeof(request));
339         request.hdr.code = ICM_APPROVE_XDOMAIN;
340         request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
341         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
342
343         request.transmit_path = xd->transmit_path;
344         request.transmit_ring = xd->transmit_ring;
345         request.receive_path = xd->receive_path;
346         request.receive_ring = xd->receive_ring;
347
348         memset(&reply, 0, sizeof(reply));
349         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
350                           1, ICM_TIMEOUT);
351         if (ret)
352                 return ret;
353
354         if (reply.hdr.flags & ICM_FLAGS_ERROR)
355                 return -EIO;
356
357         return 0;
358 }
359
360 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
361 {
362         u8 phy_port;
363         u8 cmd;
364
365         phy_port = tb_phy_port_from_link(xd->link);
366         if (phy_port == 0)
367                 cmd = NHI_MAILBOX_DISCONNECT_PA;
368         else
369                 cmd = NHI_MAILBOX_DISCONNECT_PB;
370
371         nhi_mailbox_cmd(tb->nhi, cmd, 1);
372         usleep_range(10, 50);
373         nhi_mailbox_cmd(tb->nhi, cmd, 2);
374         return 0;
375 }
376
377 static void remove_switch(struct tb_switch *sw)
378 {
379         struct tb_switch *parent_sw;
380
381         parent_sw = tb_to_switch(sw->dev.parent);
382         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
383         tb_switch_remove(sw);
384 }
385
386 static void
387 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
388 {
389         const struct icm_fr_event_device_connected *pkg =
390                 (const struct icm_fr_event_device_connected *)hdr;
391         struct tb_switch *sw, *parent_sw;
392         struct icm *icm = tb_priv(tb);
393         bool authorized = false;
394         u8 link, depth;
395         u64 route;
396         int ret;
397
398         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
399         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
400                 ICM_LINK_INFO_DEPTH_SHIFT;
401         authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
402
403         ret = icm->get_route(tb, link, depth, &route);
404         if (ret) {
405                 tb_err(tb, "failed to find route string for switch at %u.%u\n",
406                        link, depth);
407                 return;
408         }
409
410         sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
411         if (sw) {
412                 u8 phy_port, sw_phy_port;
413
414                 parent_sw = tb_to_switch(sw->dev.parent);
415                 sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
416                 phy_port = phy_port_from_route(route, depth);
417
418                 /*
419                  * On resume ICM will send us connected events for the
420                  * devices that still are present. However, that
421                  * information might have changed for example by the
422                  * fact that a switch on a dual-link connection might
423                  * have been enumerated using the other link now. Make
424                  * sure our book keeping matches that.
425                  */
426                 if (sw->depth == depth && sw_phy_port == phy_port &&
427                     !!sw->authorized == authorized) {
428                         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
429                         tb_port_at(route, parent_sw)->remote =
430                                    tb_upstream_port(sw);
431                         sw->config.route_hi = upper_32_bits(route);
432                         sw->config.route_lo = lower_32_bits(route);
433                         sw->connection_id = pkg->connection_id;
434                         sw->connection_key = pkg->connection_key;
435                         sw->link = link;
436                         sw->depth = depth;
437                         sw->is_unplugged = false;
438                         tb_switch_put(sw);
439                         return;
440                 }
441
442                 /*
443                  * User connected the same switch to another physical
444                  * port or to another part of the topology. Remove the
445                  * existing switch now before adding the new one.
446                  */
447                 remove_switch(sw);
448                 tb_switch_put(sw);
449         }
450
451         /*
452          * If the switch was not found by UUID, look for a switch on
453          * same physical port (taking possible link aggregation into
454          * account) and depth. If we found one it is definitely a stale
455          * one so remove it first.
456          */
457         sw = tb_switch_find_by_link_depth(tb, link, depth);
458         if (!sw) {
459                 u8 dual_link;
460
461                 dual_link = dual_link_from_link(link);
462                 if (dual_link)
463                         sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
464         }
465         if (sw) {
466                 remove_switch(sw);
467                 tb_switch_put(sw);
468         }
469
470         parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
471         if (!parent_sw) {
472                 tb_err(tb, "failed to find parent switch for %u.%u\n",
473                        link, depth);
474                 return;
475         }
476
477         sw = tb_switch_alloc(tb, &parent_sw->dev, route);
478         if (!sw) {
479                 tb_switch_put(parent_sw);
480                 return;
481         }
482
483         sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
484         sw->connection_id = pkg->connection_id;
485         sw->connection_key = pkg->connection_key;
486         sw->link = link;
487         sw->depth = depth;
488         sw->authorized = authorized;
489         sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
490                                 ICM_FLAGS_SLEVEL_SHIFT;
491
492         /* Link the two switches now */
493         tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
494         tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
495
496         ret = tb_switch_add(sw);
497         if (ret) {
498                 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
499                 tb_switch_put(sw);
500         }
501         tb_switch_put(parent_sw);
502 }
503
504 static void
505 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
506 {
507         const struct icm_fr_event_device_disconnected *pkg =
508                 (const struct icm_fr_event_device_disconnected *)hdr;
509         struct tb_switch *sw;
510         u8 link, depth;
511
512         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
513         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
514                 ICM_LINK_INFO_DEPTH_SHIFT;
515
516         if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
517                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
518                 return;
519         }
520
521         sw = tb_switch_find_by_link_depth(tb, link, depth);
522         if (!sw) {
523                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
524                         depth);
525                 return;
526         }
527
528         remove_switch(sw);
529         tb_switch_put(sw);
530 }
531
532 static void remove_xdomain(struct tb_xdomain *xd)
533 {
534         struct tb_switch *sw;
535
536         sw = tb_to_switch(xd->dev.parent);
537         tb_port_at(xd->route, sw)->xdomain = NULL;
538         tb_xdomain_remove(xd);
539 }
540
541 static void
542 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
543 {
544         const struct icm_fr_event_xdomain_connected *pkg =
545                 (const struct icm_fr_event_xdomain_connected *)hdr;
546         struct tb_xdomain *xd;
547         struct tb_switch *sw;
548         u8 link, depth;
549         bool approved;
550         u64 route;
551
552         /*
553          * After NVM upgrade adding root switch device fails because we
554          * initiated reset. During that time ICM might still send
555          * XDomain connected message which we ignore here.
556          */
557         if (!tb->root_switch)
558                 return;
559
560         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
561         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
562                 ICM_LINK_INFO_DEPTH_SHIFT;
563         approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
564
565         if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
566                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
567                 return;
568         }
569
570         route = get_route(pkg->local_route_hi, pkg->local_route_lo);
571
572         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
573         if (xd) {
574                 u8 xd_phy_port, phy_port;
575
576                 xd_phy_port = phy_port_from_route(xd->route, xd->depth);
577                 phy_port = phy_port_from_route(route, depth);
578
579                 if (xd->depth == depth && xd_phy_port == phy_port) {
580                         xd->link = link;
581                         xd->route = route;
582                         xd->is_unplugged = false;
583                         tb_xdomain_put(xd);
584                         return;
585                 }
586
587                 /*
588                  * If we find an existing XDomain connection remove it
589                  * now. We need to go through login handshake and
590                  * everything anyway to be able to re-establish the
591                  * connection.
592                  */
593                 remove_xdomain(xd);
594                 tb_xdomain_put(xd);
595         }
596
597         /*
598          * Look if there already exists an XDomain in the same place
599          * than the new one and in that case remove it because it is
600          * most likely another host that got disconnected.
601          */
602         xd = tb_xdomain_find_by_link_depth(tb, link, depth);
603         if (!xd) {
604                 u8 dual_link;
605
606                 dual_link = dual_link_from_link(link);
607                 if (dual_link)
608                         xd = tb_xdomain_find_by_link_depth(tb, dual_link,
609                                                            depth);
610         }
611         if (xd) {
612                 remove_xdomain(xd);
613                 tb_xdomain_put(xd);
614         }
615
616         /*
617          * If the user disconnected a switch during suspend and
618          * connected another host to the same port, remove the switch
619          * first.
620          */
621         sw = get_switch_at_route(tb->root_switch, route);
622         if (sw)
623                 remove_switch(sw);
624
625         sw = tb_switch_find_by_link_depth(tb, link, depth);
626         if (!sw) {
627                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
628                         depth);
629                 return;
630         }
631
632         xd = tb_xdomain_alloc(sw->tb, &sw->dev, route,
633                               &pkg->local_uuid, &pkg->remote_uuid);
634         if (!xd) {
635                 tb_switch_put(sw);
636                 return;
637         }
638
639         xd->link = link;
640         xd->depth = depth;
641
642         tb_port_at(route, sw)->xdomain = xd;
643
644         tb_xdomain_add(xd);
645         tb_switch_put(sw);
646 }
647
648 static void
649 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
650 {
651         const struct icm_fr_event_xdomain_disconnected *pkg =
652                 (const struct icm_fr_event_xdomain_disconnected *)hdr;
653         struct tb_xdomain *xd;
654
655         /*
656          * If the connection is through one or multiple devices, the
657          * XDomain device is removed along with them so it is fine if we
658          * cannot find it here.
659          */
660         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
661         if (xd) {
662                 remove_xdomain(xd);
663                 tb_xdomain_put(xd);
664         }
665 }
666
667 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
668 {
669         struct pci_dev *parent;
670
671         parent = pci_upstream_bridge(pdev);
672         while (parent) {
673                 if (!pci_is_pcie(parent))
674                         return NULL;
675                 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
676                         break;
677                 parent = pci_upstream_bridge(parent);
678         }
679
680         if (!parent)
681                 return NULL;
682
683         switch (parent->device) {
684         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
685         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
686         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
687         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
688         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
689                 return parent;
690         }
691
692         return NULL;
693 }
694
695 static bool icm_ar_is_supported(struct tb *tb)
696 {
697         struct pci_dev *upstream_port;
698         struct icm *icm = tb_priv(tb);
699
700         /*
701          * Starting from Alpine Ridge we can use ICM on Apple machines
702          * as well. We just need to reset and re-enable it first.
703          */
704         if (!x86_apple_machine)
705                 return true;
706
707         /*
708          * Find the upstream PCIe port in case we need to do reset
709          * through its vendor specific registers.
710          */
711         upstream_port = get_upstream_port(tb->nhi->pdev);
712         if (upstream_port) {
713                 int cap;
714
715                 cap = pci_find_ext_capability(upstream_port,
716                                               PCI_EXT_CAP_ID_VNDR);
717                 if (cap > 0) {
718                         icm->upstream_port = upstream_port;
719                         icm->vnd_cap = cap;
720
721                         return true;
722                 }
723         }
724
725         return false;
726 }
727
728 static int icm_ar_get_mode(struct tb *tb)
729 {
730         struct tb_nhi *nhi = tb->nhi;
731         int retries = 5;
732         u32 val;
733
734         do {
735                 val = ioread32(nhi->iobase + REG_FW_STS);
736                 if (val & REG_FW_STS_NVM_AUTH_DONE)
737                         break;
738                 msleep(30);
739         } while (--retries);
740
741         if (!retries) {
742                 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
743                 return -ENODEV;
744         }
745
746         return nhi_mailbox_mode(nhi);
747 }
748
749 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
750 {
751         struct icm_ar_pkg_get_route_response reply;
752         struct icm_ar_pkg_get_route request = {
753                 .hdr = { .code = ICM_GET_ROUTE },
754                 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
755         };
756         int ret;
757
758         memset(&reply, 0, sizeof(reply));
759         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
760                           1, ICM_TIMEOUT);
761         if (ret)
762                 return ret;
763
764         if (reply.hdr.flags & ICM_FLAGS_ERROR)
765                 return -EIO;
766
767         *route = get_route(reply.route_hi, reply.route_lo);
768         return 0;
769 }
770
771 static void icm_handle_notification(struct work_struct *work)
772 {
773         struct icm_notification *n = container_of(work, typeof(*n), work);
774         struct tb *tb = n->tb;
775         struct icm *icm = tb_priv(tb);
776
777         mutex_lock(&tb->lock);
778
779         switch (n->pkg->code) {
780         case ICM_EVENT_DEVICE_CONNECTED:
781                 icm->device_connected(tb, n->pkg);
782                 break;
783         case ICM_EVENT_DEVICE_DISCONNECTED:
784                 icm->device_disconnected(tb, n->pkg);
785                 break;
786         case ICM_EVENT_XDOMAIN_CONNECTED:
787                 icm->xdomain_connected(tb, n->pkg);
788                 break;
789         case ICM_EVENT_XDOMAIN_DISCONNECTED:
790                 icm->xdomain_disconnected(tb, n->pkg);
791                 break;
792         }
793
794         mutex_unlock(&tb->lock);
795
796         kfree(n->pkg);
797         kfree(n);
798 }
799
800 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
801                              const void *buf, size_t size)
802 {
803         struct icm_notification *n;
804
805         n = kmalloc(sizeof(*n), GFP_KERNEL);
806         if (!n)
807                 return;
808
809         INIT_WORK(&n->work, icm_handle_notification);
810         n->pkg = kmemdup(buf, size, GFP_KERNEL);
811         n->tb = tb;
812
813         queue_work(tb->wq, &n->work);
814 }
815
816 static int
817 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
818 {
819         struct icm_pkg_driver_ready_response reply;
820         struct icm_pkg_driver_ready request = {
821                 .hdr.code = ICM_DRIVER_READY,
822         };
823         unsigned int retries = 10;
824         int ret;
825
826         memset(&reply, 0, sizeof(reply));
827         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
828                           1, ICM_TIMEOUT);
829         if (ret)
830                 return ret;
831
832         if (security_level)
833                 *security_level = reply.security_level & 0xf;
834
835         /*
836          * Hold on here until the switch config space is accessible so
837          * that we can read root switch config successfully.
838          */
839         do {
840                 struct tb_cfg_result res;
841                 u32 tmp;
842
843                 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
844                                       0, 1, 100);
845                 if (!res.err)
846                         return 0;
847
848                 msleep(50);
849         } while (--retries);
850
851         return -ETIMEDOUT;
852 }
853
854 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
855 {
856         unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
857         u32 cmd;
858
859         do {
860                 pci_read_config_dword(icm->upstream_port,
861                                       icm->vnd_cap + PCIE2CIO_CMD, &cmd);
862                 if (!(cmd & PCIE2CIO_CMD_START)) {
863                         if (cmd & PCIE2CIO_CMD_TIMEOUT)
864                                 break;
865                         return 0;
866                 }
867
868                 msleep(50);
869         } while (time_before(jiffies, end));
870
871         return -ETIMEDOUT;
872 }
873
874 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
875                          unsigned int port, unsigned int index, u32 *data)
876 {
877         struct pci_dev *pdev = icm->upstream_port;
878         int ret, vnd_cap = icm->vnd_cap;
879         u32 cmd;
880
881         cmd = index;
882         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
883         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
884         cmd |= PCIE2CIO_CMD_START;
885         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
886
887         ret = pci2cio_wait_completion(icm, 5000);
888         if (ret)
889                 return ret;
890
891         pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
892         return 0;
893 }
894
895 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
896                           unsigned int port, unsigned int index, u32 data)
897 {
898         struct pci_dev *pdev = icm->upstream_port;
899         int vnd_cap = icm->vnd_cap;
900         u32 cmd;
901
902         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
903
904         cmd = index;
905         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
906         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
907         cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
908         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
909
910         return pci2cio_wait_completion(icm, 5000);
911 }
912
913 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
914 {
915         struct icm *icm = tb_priv(tb);
916         u32 val;
917
918         /* Put ARC to wait for CIO reset event to happen */
919         val = ioread32(nhi->iobase + REG_FW_STS);
920         val |= REG_FW_STS_CIO_RESET_REQ;
921         iowrite32(val, nhi->iobase + REG_FW_STS);
922
923         /* Re-start ARC */
924         val = ioread32(nhi->iobase + REG_FW_STS);
925         val |= REG_FW_STS_ICM_EN_INVERT;
926         val |= REG_FW_STS_ICM_EN_CPU;
927         iowrite32(val, nhi->iobase + REG_FW_STS);
928
929         /* Trigger CIO reset now */
930         return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
931 }
932
933 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
934 {
935         unsigned int retries = 10;
936         int ret;
937         u32 val;
938
939         /* Check if the ICM firmware is already running */
940         val = ioread32(nhi->iobase + REG_FW_STS);
941         if (val & REG_FW_STS_ICM_EN)
942                 return 0;
943
944         dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
945
946         ret = icm_firmware_reset(tb, nhi);
947         if (ret)
948                 return ret;
949
950         /* Wait until the ICM firmware tells us it is up and running */
951         do {
952                 /* Check that the ICM firmware is running */
953                 val = ioread32(nhi->iobase + REG_FW_STS);
954                 if (val & REG_FW_STS_NVM_AUTH_DONE)
955                         return 0;
956
957                 msleep(300);
958         } while (--retries);
959
960         return -ETIMEDOUT;
961 }
962
963 static int icm_reset_phy_port(struct tb *tb, int phy_port)
964 {
965         struct icm *icm = tb_priv(tb);
966         u32 state0, state1;
967         int port0, port1;
968         u32 val0, val1;
969         int ret;
970
971         if (!icm->upstream_port)
972                 return 0;
973
974         if (phy_port) {
975                 port0 = 3;
976                 port1 = 4;
977         } else {
978                 port0 = 1;
979                 port1 = 2;
980         }
981
982         /*
983          * Read link status of both null ports belonging to a single
984          * physical port.
985          */
986         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
987         if (ret)
988                 return ret;
989         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
990         if (ret)
991                 return ret;
992
993         state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
994         state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
995         state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
996         state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
997
998         /* If they are both up we need to reset them now */
999         if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1000                 return 0;
1001
1002         val0 |= PHY_PORT_CS1_LINK_DISABLE;
1003         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1004         if (ret)
1005                 return ret;
1006
1007         val1 |= PHY_PORT_CS1_LINK_DISABLE;
1008         ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1009         if (ret)
1010                 return ret;
1011
1012         /* Wait a bit and then re-enable both ports */
1013         usleep_range(10, 100);
1014
1015         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1016         if (ret)
1017                 return ret;
1018         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1019         if (ret)
1020                 return ret;
1021
1022         val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1023         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1024         if (ret)
1025                 return ret;
1026
1027         val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1028         return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1029 }
1030
1031 static int icm_firmware_init(struct tb *tb)
1032 {
1033         struct icm *icm = tb_priv(tb);
1034         struct tb_nhi *nhi = tb->nhi;
1035         int ret;
1036
1037         ret = icm_firmware_start(tb, nhi);
1038         if (ret) {
1039                 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1040                 return ret;
1041         }
1042
1043         if (icm->get_mode) {
1044                 ret = icm->get_mode(tb);
1045
1046                 switch (ret) {
1047                 case NHI_FW_SAFE_MODE:
1048                         icm->safe_mode = true;
1049                         break;
1050
1051                 case NHI_FW_CM_MODE:
1052                         /* Ask ICM to accept all Thunderbolt devices */
1053                         nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1054                         break;
1055
1056                 default:
1057                         tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1058                         return -ENODEV;
1059                 }
1060         }
1061
1062         /*
1063          * Reset both physical ports if there is anything connected to
1064          * them already.
1065          */
1066         ret = icm_reset_phy_port(tb, 0);
1067         if (ret)
1068                 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1069         ret = icm_reset_phy_port(tb, 1);
1070         if (ret)
1071                 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1072
1073         return 0;
1074 }
1075
1076 static int icm_driver_ready(struct tb *tb)
1077 {
1078         struct icm *icm = tb_priv(tb);
1079         int ret;
1080
1081         ret = icm_firmware_init(tb);
1082         if (ret)
1083                 return ret;
1084
1085         if (icm->safe_mode) {
1086                 tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1087                 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1088                 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1089                 return 0;
1090         }
1091
1092         return __icm_driver_ready(tb, &tb->security_level);
1093 }
1094
1095 static int icm_suspend(struct tb *tb)
1096 {
1097         int ret;
1098
1099         ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
1100         if (ret)
1101                 tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
1102                         ret, __func__);
1103
1104         return 0;
1105 }
1106
1107 /*
1108  * Mark all switches (except root switch) below this one unplugged. ICM
1109  * firmware will send us an updated list of switches after we have send
1110  * it driver ready command. If a switch is not in that list it will be
1111  * removed when we perform rescan.
1112  */
1113 static void icm_unplug_children(struct tb_switch *sw)
1114 {
1115         unsigned int i;
1116
1117         if (tb_route(sw))
1118                 sw->is_unplugged = true;
1119
1120         for (i = 1; i <= sw->config.max_port_number; i++) {
1121                 struct tb_port *port = &sw->ports[i];
1122
1123                 if (tb_is_upstream_port(port))
1124                         continue;
1125                 if (port->xdomain) {
1126                         port->xdomain->is_unplugged = true;
1127                         continue;
1128                 }
1129                 if (!port->remote)
1130                         continue;
1131
1132                 icm_unplug_children(port->remote->sw);
1133         }
1134 }
1135
1136 static void icm_free_unplugged_children(struct tb_switch *sw)
1137 {
1138         unsigned int i;
1139
1140         for (i = 1; i <= sw->config.max_port_number; i++) {
1141                 struct tb_port *port = &sw->ports[i];
1142
1143                 if (tb_is_upstream_port(port))
1144                         continue;
1145
1146                 if (port->xdomain && port->xdomain->is_unplugged) {
1147                         tb_xdomain_remove(port->xdomain);
1148                         port->xdomain = NULL;
1149                         continue;
1150                 }
1151
1152                 if (!port->remote)
1153                         continue;
1154
1155                 if (port->remote->sw->is_unplugged) {
1156                         tb_switch_remove(port->remote->sw);
1157                         port->remote = NULL;
1158                 } else {
1159                         icm_free_unplugged_children(port->remote->sw);
1160                 }
1161         }
1162 }
1163
1164 static void icm_rescan_work(struct work_struct *work)
1165 {
1166         struct icm *icm = container_of(work, struct icm, rescan_work.work);
1167         struct tb *tb = icm_to_tb(icm);
1168
1169         mutex_lock(&tb->lock);
1170         if (tb->root_switch)
1171                 icm_free_unplugged_children(tb->root_switch);
1172         mutex_unlock(&tb->lock);
1173 }
1174
1175 static void icm_complete(struct tb *tb)
1176 {
1177         struct icm *icm = tb_priv(tb);
1178
1179         if (tb->nhi->going_away)
1180                 return;
1181
1182         icm_unplug_children(tb->root_switch);
1183
1184         /*
1185          * Now all existing children should be resumed, start events
1186          * from ICM to get updated status.
1187          */
1188         __icm_driver_ready(tb, NULL);
1189
1190         /*
1191          * We do not get notifications of devices that have been
1192          * unplugged during suspend so schedule rescan to clean them up
1193          * if any.
1194          */
1195         queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1196 }
1197
1198 static int icm_start(struct tb *tb)
1199 {
1200         struct icm *icm = tb_priv(tb);
1201         int ret;
1202
1203         if (icm->safe_mode)
1204                 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1205         else
1206                 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1207         if (!tb->root_switch)
1208                 return -ENODEV;
1209
1210         /*
1211          * NVM upgrade has not been tested on Apple systems and they
1212          * don't provide images publicly either. To be on the safe side
1213          * prevent root switch NVM upgrade on Macs for now.
1214          */
1215         tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1216
1217         ret = tb_switch_add(tb->root_switch);
1218         if (ret) {
1219                 tb_switch_put(tb->root_switch);
1220                 tb->root_switch = NULL;
1221         }
1222
1223         return ret;
1224 }
1225
1226 static void icm_stop(struct tb *tb)
1227 {
1228         struct icm *icm = tb_priv(tb);
1229
1230         cancel_delayed_work(&icm->rescan_work);
1231         tb_switch_remove(tb->root_switch);
1232         tb->root_switch = NULL;
1233         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1234 }
1235
1236 static int icm_disconnect_pcie_paths(struct tb *tb)
1237 {
1238         return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1239 }
1240
1241 /* Falcon Ridge and Alpine Ridge */
1242 static const struct tb_cm_ops icm_fr_ops = {
1243         .driver_ready = icm_driver_ready,
1244         .start = icm_start,
1245         .stop = icm_stop,
1246         .suspend = icm_suspend,
1247         .complete = icm_complete,
1248         .handle_event = icm_handle_event,
1249         .approve_switch = icm_fr_approve_switch,
1250         .add_switch_key = icm_fr_add_switch_key,
1251         .challenge_switch_key = icm_fr_challenge_switch_key,
1252         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1253         .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1254         .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1255 };
1256
1257 struct tb *icm_probe(struct tb_nhi *nhi)
1258 {
1259         struct icm *icm;
1260         struct tb *tb;
1261
1262         tb = tb_domain_alloc(nhi, sizeof(struct icm));
1263         if (!tb)
1264                 return NULL;
1265
1266         icm = tb_priv(tb);
1267         INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1268         mutex_init(&icm->request_lock);
1269
1270         switch (nhi->pdev->device) {
1271         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1272         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1273                 icm->is_supported = icm_fr_is_supported;
1274                 icm->get_route = icm_fr_get_route;
1275                 icm->device_connected = icm_fr_device_connected;
1276                 icm->device_disconnected = icm_fr_device_disconnected;
1277                 icm->xdomain_connected = icm_fr_xdomain_connected;
1278                 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1279                 tb->cm_ops = &icm_fr_ops;
1280                 break;
1281
1282         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1283         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1284         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1285         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1286         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1287                 icm->is_supported = icm_ar_is_supported;
1288                 icm->get_mode = icm_ar_get_mode;
1289                 icm->get_route = icm_ar_get_route;
1290                 icm->device_connected = icm_fr_device_connected;
1291                 icm->device_disconnected = icm_fr_device_disconnected;
1292                 icm->xdomain_connected = icm_fr_xdomain_connected;
1293                 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1294                 tb->cm_ops = &icm_fr_ops;
1295                 break;
1296         }
1297
1298         if (!icm->is_supported || !icm->is_supported(tb)) {
1299                 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1300                 tb_domain_put(tb);
1301                 return NULL;
1302         }
1303
1304         return tb;
1305 }