1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
20 /* Switch NVM support */
22 #define NVM_DEVID 0x05
23 #define NVM_VERSION 0x08
25 #define NVM_FLASH_SIZE 0x45
27 #define NVM_MIN_SIZE SZ_32K
28 #define NVM_MAX_SIZE SZ_512K
30 static DEFINE_IDA(nvm_ida);
32 struct nvm_auth_status {
33 struct list_head list;
39 * Hold NVM authentication failure status per switch This information
40 * needs to stay around even when the switch gets power cycled so we
43 static LIST_HEAD(nvm_auth_status_cache);
44 static DEFINE_MUTEX(nvm_auth_status_lock);
46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
48 struct nvm_auth_status *st;
50 list_for_each_entry(st, &nvm_auth_status_cache, list) {
51 if (uuid_equal(&st->uuid, sw->uuid))
58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
60 struct nvm_auth_status *st;
62 mutex_lock(&nvm_auth_status_lock);
63 st = __nvm_get_auth_status(sw);
64 mutex_unlock(&nvm_auth_status_lock);
66 *status = st ? st->status : 0;
69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
71 struct nvm_auth_status *st;
73 if (WARN_ON(!sw->uuid))
76 mutex_lock(&nvm_auth_status_lock);
77 st = __nvm_get_auth_status(sw);
80 st = kzalloc(sizeof(*st), GFP_KERNEL);
84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 INIT_LIST_HEAD(&st->list);
86 list_add_tail(&st->list, &nvm_auth_status_cache);
91 mutex_unlock(&nvm_auth_status_lock);
94 static void nvm_clear_auth_status(const struct tb_switch *sw)
96 struct nvm_auth_status *st;
98 mutex_lock(&nvm_auth_status_lock);
99 st = __nvm_get_auth_status(sw);
104 mutex_unlock(&nvm_auth_status_lock);
107 static int nvm_validate_and_write(struct tb_switch *sw)
109 unsigned int image_size, hdr_size;
110 const u8 *buf = sw->nvm->buf;
117 image_size = sw->nvm->buf_data_size;
118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
122 * FARB pointer must point inside the image and must at least
123 * contain parts of the digital section we will be reading here.
125 hdr_size = (*(u32 *)buf) & 0xffffff;
126 if (hdr_size + NVM_DEVID + 2 >= image_size)
129 /* Digital section start should be aligned to 4k page */
130 if (!IS_ALIGNED(hdr_size, SZ_4K))
134 * Read digital section size and check that it also fits inside
137 ds_size = *(u16 *)(buf + hdr_size);
138 if (ds_size >= image_size)
141 if (!sw->safe_mode) {
145 * Make sure the device ID in the image matches the one
146 * we read from the switch config space.
148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
149 if (device_id != sw->config.device_id)
152 if (sw->generation < 3) {
153 /* Write CSS headers first */
154 ret = dma_port_flash_write(sw->dma_port,
155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
156 DMA_PORT_CSS_MAX_SIZE);
161 /* Skip headers in the image */
163 image_size -= hdr_size;
166 return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
169 static int nvm_authenticate_host(struct tb_switch *sw)
174 * Root switch NVM upgrade requires that we disconnect the
175 * existing paths first (in case it is not in safe mode
178 if (!sw->safe_mode) {
179 ret = tb_domain_disconnect_all_paths(sw->tb);
183 * The host controller goes away pretty soon after this if
184 * everything goes well so getting timeout is expected.
186 ret = dma_port_flash_update_auth(sw->dma_port);
187 return ret == -ETIMEDOUT ? 0 : ret;
191 * From safe mode we can get out by just power cycling the
194 dma_port_power_cycle(sw->dma_port);
198 static int nvm_authenticate_device(struct tb_switch *sw)
200 int ret, retries = 10;
202 ret = dma_port_flash_update_auth(sw->dma_port);
203 if (ret && ret != -ETIMEDOUT)
207 * Poll here for the authentication status. It takes some time
208 * for the device to respond (we get timeout for a while). Once
209 * we get response the device needs to be power cycled in order
210 * to the new NVM to be taken into use.
215 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
216 if (ret < 0 && ret != -ETIMEDOUT)
220 tb_sw_warn(sw, "failed to authenticate NVM\n");
221 nvm_set_auth_status(sw, status);
224 tb_sw_info(sw, "power cycling the switch now\n");
225 dma_port_power_cycle(sw->dma_port);
235 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
238 struct tb_switch *sw = priv;
241 pm_runtime_get_sync(&sw->dev);
242 ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
243 pm_runtime_mark_last_busy(&sw->dev);
244 pm_runtime_put_autosuspend(&sw->dev);
249 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
252 struct tb_switch *sw = priv;
255 if (!mutex_trylock(&sw->tb->lock))
256 return restart_syscall();
259 * Since writing the NVM image might require some special steps,
260 * for example when CSS headers are written, we cache the image
261 * locally here and handle the special cases when the user asks
262 * us to authenticate the image.
265 sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
272 sw->nvm->buf_data_size = offset + bytes;
273 memcpy(sw->nvm->buf + offset, val, bytes);
276 mutex_unlock(&sw->tb->lock);
281 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
282 size_t size, bool active)
284 struct nvmem_config config;
286 memset(&config, 0, sizeof(config));
289 config.name = "nvm_active";
290 config.reg_read = tb_switch_nvm_read;
291 config.read_only = true;
293 config.name = "nvm_non_active";
294 config.reg_write = tb_switch_nvm_write;
295 config.root_only = true;
300 config.word_size = 4;
302 config.dev = &sw->dev;
303 config.owner = THIS_MODULE;
306 return nvmem_register(&config);
309 static int tb_switch_nvm_add(struct tb_switch *sw)
311 struct nvmem_device *nvm_dev;
312 struct tb_switch_nvm *nvm;
319 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
323 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
326 * If the switch is in safe-mode the only accessible portion of
327 * the NVM is the non-active one where userspace is expected to
328 * write new functional NVM.
330 if (!sw->safe_mode) {
331 u32 nvm_size, hdr_size;
333 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
338 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
339 nvm_size = (SZ_1M << (val & 7)) / 8;
340 nvm_size = (nvm_size - hdr_size) / 2;
342 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
347 nvm->major = val >> 16;
348 nvm->minor = val >> 8;
350 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
351 if (IS_ERR(nvm_dev)) {
352 ret = PTR_ERR(nvm_dev);
355 nvm->active = nvm_dev;
358 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
359 if (IS_ERR(nvm_dev)) {
360 ret = PTR_ERR(nvm_dev);
363 nvm->non_active = nvm_dev;
370 nvmem_unregister(nvm->active);
372 ida_simple_remove(&nvm_ida, nvm->id);
378 static void tb_switch_nvm_remove(struct tb_switch *sw)
380 struct tb_switch_nvm *nvm;
388 /* Remove authentication status in case the switch is unplugged */
389 if (!nvm->authenticating)
390 nvm_clear_auth_status(sw);
392 nvmem_unregister(nvm->non_active);
394 nvmem_unregister(nvm->active);
395 ida_simple_remove(&nvm_ida, nvm->id);
400 /* port utility functions */
402 static const char *tb_port_type(struct tb_regs_port_header *port)
404 switch (port->type >> 16) {
406 switch ((u8) port->type) {
431 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
434 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
435 port->port_number, port->vendor_id, port->device_id,
436 port->revision, port->thunderbolt_version, tb_port_type(port),
438 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
439 port->max_in_hop_id, port->max_out_hop_id);
440 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
441 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
445 * tb_port_state() - get connectedness state of a port
447 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
449 * Return: Returns an enum tb_port_state on success or an error code on failure.
451 static int tb_port_state(struct tb_port *port)
453 struct tb_cap_phy phy;
455 if (port->cap_phy == 0) {
456 tb_port_WARN(port, "does not have a PHY\n");
459 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
466 * tb_wait_for_port() - wait for a port to become ready
468 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
469 * wait_if_unplugged is set then we also wait if the port is in state
470 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
471 * switch resume). Otherwise we only wait if a device is registered but the link
472 * has not yet been established.
474 * Return: Returns an error code on failure. Returns 0 if the port is not
475 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
476 * if the port is connected and in state TB_PORT_UP.
478 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
482 if (!port->cap_phy) {
483 tb_port_WARN(port, "does not have PHY\n");
486 if (tb_is_upstream_port(port)) {
487 tb_port_WARN(port, "is the upstream port\n");
492 state = tb_port_state(port);
495 if (state == TB_PORT_DISABLED) {
496 tb_port_info(port, "is disabled (state: 0)\n");
499 if (state == TB_PORT_UNPLUGGED) {
500 if (wait_if_unplugged) {
501 /* used during resume */
503 "is unplugged (state: 7), retrying...\n");
507 tb_port_info(port, "is unplugged (state: 7)\n");
510 if (state == TB_PORT_UP) {
512 "is connected, link is up (state: 2)\n");
517 * After plug-in the state is TB_PORT_CONNECTING. Give it some
521 "is connected, link is not up (state: %d), retrying...\n",
526 "failed to reach state TB_PORT_UP. Ignoring port...\n");
531 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
533 * Change the number of NFC credits allocated to @port by @credits. To remove
534 * NFC credits pass a negative amount of credits.
536 * Return: Returns 0 on success or an error code on failure.
538 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
543 "adding %#x NFC credits (%#x -> %#x)",
545 port->config.nfc_credits,
546 port->config.nfc_credits + credits);
547 port->config.nfc_credits += credits;
548 return tb_port_write(port, &port->config.nfc_credits,
553 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
555 * Return: Returns 0 on success or an error code on failure.
557 int tb_port_clear_counter(struct tb_port *port, int counter)
559 u32 zero[3] = { 0, 0, 0 };
560 tb_port_info(port, "clearing counter %d\n", counter);
561 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
565 * tb_init_port() - initialize a port
567 * This is a helper method for tb_switch_alloc. Does not check or initialize
568 * any downstream switches.
570 * Return: Returns 0 on success or an error code on failure.
572 static int tb_init_port(struct tb_port *port)
577 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
581 /* Port 0 is the switch itself and has no PHY. */
582 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
583 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
588 tb_port_WARN(port, "non switch port without a PHY\n");
589 } else if (port->port != 0) {
590 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
592 port->cap_adap = cap;
595 tb_dump_port(port->sw->tb, &port->config);
597 /* Control port does not need HopID allocation */
599 ida_init(&port->in_hopids);
600 ida_init(&port->out_hopids);
607 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
614 port_max_hopid = port->config.max_in_hop_id;
615 ida = &port->in_hopids;
617 port_max_hopid = port->config.max_out_hop_id;
618 ida = &port->out_hopids;
621 /* HopIDs 0-7 are reserved */
622 if (min_hopid < TB_PATH_MIN_HOPID)
623 min_hopid = TB_PATH_MIN_HOPID;
625 if (max_hopid < 0 || max_hopid > port_max_hopid)
626 max_hopid = port_max_hopid;
628 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
632 * tb_port_alloc_in_hopid() - Allocate input HopID from port
633 * @port: Port to allocate HopID for
634 * @min_hopid: Minimum acceptable input HopID
635 * @max_hopid: Maximum acceptable input HopID
637 * Return: HopID between @min_hopid and @max_hopid or negative errno in
640 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
642 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
646 * tb_port_alloc_out_hopid() - Allocate output HopID from port
647 * @port: Port to allocate HopID for
648 * @min_hopid: Minimum acceptable output HopID
649 * @max_hopid: Maximum acceptable output HopID
651 * Return: HopID between @min_hopid and @max_hopid or negative errno in
654 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
656 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
660 * tb_port_release_in_hopid() - Release allocated input HopID from port
661 * @port: Port whose HopID to release
662 * @hopid: HopID to release
664 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
666 ida_simple_remove(&port->in_hopids, hopid);
670 * tb_port_release_out_hopid() - Release allocated output HopID from port
671 * @port: Port whose HopID to release
672 * @hopid: HopID to release
674 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
676 ida_simple_remove(&port->out_hopids, hopid);
680 * tb_next_port_on_path() - Return next port for given port on a path
681 * @start: Start port of the walk
682 * @end: End port of the walk
683 * @prev: Previous port (%NULL if this is the first)
685 * This function can be used to walk from one port to another if they
686 * are connected through zero or more switches. If the @prev is dual
687 * link port, the function follows that link and returns another end on
690 * If the @end port has been reached, return %NULL.
692 * Domain tb->lock must be held when this function is called.
694 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
695 struct tb_port *prev)
697 struct tb_port *next;
702 if (prev->sw == end->sw) {
708 if (start->sw->config.depth < end->sw->config.depth) {
710 prev->remote->sw->config.depth > prev->sw->config.depth)
713 next = tb_port_at(tb_route(end->sw), prev->sw);
715 if (tb_is_upstream_port(prev)) {
718 next = tb_upstream_port(prev->sw);
720 * Keep the same link if prev and next are both
723 if (next->dual_link_port &&
724 next->link_nr != prev->link_nr) {
725 next = next->dual_link_port;
734 * tb_port_is_enabled() - Is the adapter port enabled
735 * @port: Port to check
737 bool tb_port_is_enabled(struct tb_port *port)
739 switch (port->config.type) {
740 case TB_TYPE_PCIE_UP:
741 case TB_TYPE_PCIE_DOWN:
742 return tb_pci_port_is_enabled(port);
750 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
751 * @port: PCIe port to check
753 bool tb_pci_port_is_enabled(struct tb_port *port)
757 if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
760 return !!(data & TB_PCI_EN);
764 * tb_pci_port_enable() - Enable PCIe adapter port
765 * @port: PCIe port to enable
766 * @enable: Enable/disable the PCIe adapter
768 int tb_pci_port_enable(struct tb_port *port, bool enable)
770 u32 word = enable ? TB_PCI_EN : 0x0;
773 return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
776 /* switch utility functions */
778 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
780 tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
781 sw->vendor_id, sw->device_id, sw->revision,
782 sw->thunderbolt_version);
783 tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
784 tb_dbg(tb, " Config:\n");
786 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
787 sw->upstream_port_number, sw->depth,
788 (((u64) sw->route_hi) << 32) | sw->route_lo,
789 sw->enabled, sw->plug_events_delay);
790 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
791 sw->__unknown1, sw->__unknown4);
795 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
797 * Return: Returns 0 on success or an error code on failure.
799 int tb_switch_reset(struct tb *tb, u64 route)
801 struct tb_cfg_result res;
802 struct tb_regs_switch_header header = {
803 header.route_hi = route >> 32,
804 header.route_lo = route,
805 header.enabled = true,
807 tb_dbg(tb, "resetting switch at %llx\n", route);
808 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
812 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
819 * tb_plug_events_active() - enable/disable plug events on a switch
821 * Also configures a sane plug_events_delay of 255ms.
823 * Return: Returns 0 on success or an error code on failure.
825 static int tb_plug_events_active(struct tb_switch *sw, bool active)
830 if (!sw->config.enabled)
833 sw->config.plug_events_delay = 0xff;
834 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
838 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
843 data = data & 0xFFFFFF83;
844 switch (sw->config.device_id) {
845 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
846 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
847 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
855 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
856 sw->cap_plug_events + 1, 1);
859 static ssize_t authorized_show(struct device *dev,
860 struct device_attribute *attr,
863 struct tb_switch *sw = tb_to_switch(dev);
865 return sprintf(buf, "%u\n", sw->authorized);
868 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
872 if (!mutex_trylock(&sw->tb->lock))
873 return restart_syscall();
879 * Make sure there is no PCIe rescan ongoing when a new PCIe
880 * tunnel is created. Otherwise the PCIe rescan code might find
881 * the new tunnel too early.
883 pci_lock_rescan_remove();
884 pm_runtime_get_sync(&sw->dev);
890 ret = tb_domain_approve_switch_key(sw->tb, sw);
892 ret = tb_domain_approve_switch(sw->tb, sw);
895 /* Challenge switch */
898 ret = tb_domain_challenge_switch_key(sw->tb, sw);
905 pm_runtime_mark_last_busy(&sw->dev);
906 pm_runtime_put_autosuspend(&sw->dev);
907 pci_unlock_rescan_remove();
910 sw->authorized = val;
911 /* Notify status change to the userspace */
912 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
916 mutex_unlock(&sw->tb->lock);
920 static ssize_t authorized_store(struct device *dev,
921 struct device_attribute *attr,
922 const char *buf, size_t count)
924 struct tb_switch *sw = tb_to_switch(dev);
928 ret = kstrtouint(buf, 0, &val);
934 ret = tb_switch_set_authorized(sw, val);
936 return ret ? ret : count;
938 static DEVICE_ATTR_RW(authorized);
940 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
943 struct tb_switch *sw = tb_to_switch(dev);
945 return sprintf(buf, "%u\n", sw->boot);
947 static DEVICE_ATTR_RO(boot);
949 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
952 struct tb_switch *sw = tb_to_switch(dev);
954 return sprintf(buf, "%#x\n", sw->device);
956 static DEVICE_ATTR_RO(device);
959 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
961 struct tb_switch *sw = tb_to_switch(dev);
963 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
965 static DEVICE_ATTR_RO(device_name);
967 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
970 struct tb_switch *sw = tb_to_switch(dev);
973 if (!mutex_trylock(&sw->tb->lock))
974 return restart_syscall();
977 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
979 ret = sprintf(buf, "\n");
981 mutex_unlock(&sw->tb->lock);
985 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
986 const char *buf, size_t count)
988 struct tb_switch *sw = tb_to_switch(dev);
989 u8 key[TB_SWITCH_KEY_SIZE];
993 if (!strcmp(buf, "\n"))
995 else if (hex2bin(key, buf, sizeof(key)))
998 if (!mutex_trylock(&sw->tb->lock))
999 return restart_syscall();
1001 if (sw->authorized) {
1008 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1014 mutex_unlock(&sw->tb->lock);
1017 static DEVICE_ATTR(key, 0600, key_show, key_store);
1019 static void nvm_authenticate_start(struct tb_switch *sw)
1021 struct pci_dev *root_port;
1024 * During host router NVM upgrade we should not allow root port to
1025 * go into D3cold because some root ports cannot trigger PME
1026 * itself. To be on the safe side keep the root port in D0 during
1027 * the whole upgrade process.
1029 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1031 pm_runtime_get_noresume(&root_port->dev);
1034 static void nvm_authenticate_complete(struct tb_switch *sw)
1036 struct pci_dev *root_port;
1038 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1040 pm_runtime_put(&root_port->dev);
1043 static ssize_t nvm_authenticate_show(struct device *dev,
1044 struct device_attribute *attr, char *buf)
1046 struct tb_switch *sw = tb_to_switch(dev);
1049 nvm_get_auth_status(sw, &status);
1050 return sprintf(buf, "%#x\n", status);
1053 static ssize_t nvm_authenticate_store(struct device *dev,
1054 struct device_attribute *attr, const char *buf, size_t count)
1056 struct tb_switch *sw = tb_to_switch(dev);
1060 if (!mutex_trylock(&sw->tb->lock))
1061 return restart_syscall();
1063 /* If NVMem devices are not yet added */
1069 ret = kstrtobool(buf, &val);
1073 /* Always clear the authentication status */
1074 nvm_clear_auth_status(sw);
1077 if (!sw->nvm->buf) {
1082 pm_runtime_get_sync(&sw->dev);
1083 ret = nvm_validate_and_write(sw);
1085 pm_runtime_mark_last_busy(&sw->dev);
1086 pm_runtime_put_autosuspend(&sw->dev);
1090 sw->nvm->authenticating = true;
1092 if (!tb_route(sw)) {
1094 * Keep root port from suspending as long as the
1095 * NVM upgrade process is running.
1097 nvm_authenticate_start(sw);
1098 ret = nvm_authenticate_host(sw);
1100 nvm_authenticate_complete(sw);
1102 ret = nvm_authenticate_device(sw);
1104 pm_runtime_mark_last_busy(&sw->dev);
1105 pm_runtime_put_autosuspend(&sw->dev);
1109 mutex_unlock(&sw->tb->lock);
1115 static DEVICE_ATTR_RW(nvm_authenticate);
1117 static ssize_t nvm_version_show(struct device *dev,
1118 struct device_attribute *attr, char *buf)
1120 struct tb_switch *sw = tb_to_switch(dev);
1123 if (!mutex_trylock(&sw->tb->lock))
1124 return restart_syscall();
1131 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1133 mutex_unlock(&sw->tb->lock);
1137 static DEVICE_ATTR_RO(nvm_version);
1139 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1142 struct tb_switch *sw = tb_to_switch(dev);
1144 return sprintf(buf, "%#x\n", sw->vendor);
1146 static DEVICE_ATTR_RO(vendor);
1149 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1151 struct tb_switch *sw = tb_to_switch(dev);
1153 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1155 static DEVICE_ATTR_RO(vendor_name);
1157 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1160 struct tb_switch *sw = tb_to_switch(dev);
1162 return sprintf(buf, "%pUb\n", sw->uuid);
1164 static DEVICE_ATTR_RO(unique_id);
1166 static struct attribute *switch_attrs[] = {
1167 &dev_attr_authorized.attr,
1168 &dev_attr_boot.attr,
1169 &dev_attr_device.attr,
1170 &dev_attr_device_name.attr,
1172 &dev_attr_nvm_authenticate.attr,
1173 &dev_attr_nvm_version.attr,
1174 &dev_attr_vendor.attr,
1175 &dev_attr_vendor_name.attr,
1176 &dev_attr_unique_id.attr,
1180 static umode_t switch_attr_is_visible(struct kobject *kobj,
1181 struct attribute *attr, int n)
1183 struct device *dev = container_of(kobj, struct device, kobj);
1184 struct tb_switch *sw = tb_to_switch(dev);
1186 if (attr == &dev_attr_key.attr) {
1188 sw->tb->security_level == TB_SECURITY_SECURE &&
1189 sw->security_level == TB_SECURITY_SECURE)
1192 } else if (attr == &dev_attr_nvm_authenticate.attr ||
1193 attr == &dev_attr_nvm_version.attr) {
1197 } else if (attr == &dev_attr_boot.attr) {
1203 return sw->safe_mode ? 0 : attr->mode;
1206 static struct attribute_group switch_group = {
1207 .is_visible = switch_attr_is_visible,
1208 .attrs = switch_attrs,
1211 static const struct attribute_group *switch_groups[] = {
1216 static void tb_switch_release(struct device *dev)
1218 struct tb_switch *sw = tb_to_switch(dev);
1221 dma_port_free(sw->dma_port);
1223 for (i = 1; i <= sw->config.max_port_number; i++) {
1224 if (!sw->ports[i].disabled) {
1225 ida_destroy(&sw->ports[i].in_hopids);
1226 ida_destroy(&sw->ports[i].out_hopids);
1231 kfree(sw->device_name);
1232 kfree(sw->vendor_name);
1240 * Currently only need to provide the callbacks. Everything else is handled
1241 * in the connection manager.
1243 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1248 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1253 static const struct dev_pm_ops tb_switch_pm_ops = {
1254 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1258 struct device_type tb_switch_type = {
1259 .name = "thunderbolt_device",
1260 .release = tb_switch_release,
1261 .pm = &tb_switch_pm_ops,
1264 static int tb_switch_get_generation(struct tb_switch *sw)
1266 switch (sw->config.device_id) {
1267 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1268 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1269 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1270 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1271 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1272 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1273 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1274 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1277 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1278 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1279 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1282 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1283 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1284 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1285 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1286 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1287 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1288 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1289 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1294 * For unknown switches assume generation to be 1 to be
1297 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1298 sw->config.device_id);
1304 * tb_switch_alloc() - allocate a switch
1305 * @tb: Pointer to the owning domain
1306 * @parent: Parent device for this switch
1307 * @route: Route string for this switch
1309 * Allocates and initializes a switch. Will not upload configuration to
1310 * the switch. For that you need to call tb_switch_configure()
1311 * separately. The returned switch should be released by calling
1314 * Return: Pointer to the allocated switch or %NULL in case of failure
1316 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1319 struct tb_switch *sw;
1323 /* Make sure we do not exceed maximum topology limit */
1324 depth = tb_route_length(route);
1325 if (depth > TB_SWITCH_MAX_DEPTH)
1328 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1329 if (upstream_port < 0)
1332 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1337 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
1338 goto err_free_sw_ports;
1340 tb_dbg(tb, "current switch config:\n");
1341 tb_dump_switch(tb, &sw->config);
1343 /* configure switch */
1344 sw->config.upstream_port_number = upstream_port;
1345 sw->config.depth = depth;
1346 sw->config.route_hi = upper_32_bits(route);
1347 sw->config.route_lo = lower_32_bits(route);
1348 sw->config.enabled = 0;
1350 /* initialize ports */
1351 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1354 goto err_free_sw_ports;
1356 for (i = 0; i <= sw->config.max_port_number; i++) {
1357 /* minimum setup for tb_find_cap and tb_drom_read to work */
1358 sw->ports[i].sw = sw;
1359 sw->ports[i].port = i;
1362 sw->generation = tb_switch_get_generation(sw);
1364 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1366 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1367 goto err_free_sw_ports;
1369 sw->cap_plug_events = cap;
1371 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1375 /* Root switch is always authorized */
1377 sw->authorized = true;
1379 device_initialize(&sw->dev);
1380 sw->dev.parent = parent;
1381 sw->dev.bus = &tb_bus_type;
1382 sw->dev.type = &tb_switch_type;
1383 sw->dev.groups = switch_groups;
1384 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1396 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1397 * @tb: Pointer to the owning domain
1398 * @parent: Parent device for this switch
1399 * @route: Route string for this switch
1401 * This creates a switch in safe mode. This means the switch pretty much
1402 * lacks all capabilities except DMA configuration port before it is
1403 * flashed with a valid NVM firmware.
1405 * The returned switch must be released by calling tb_switch_put().
1407 * Return: Pointer to the allocated switch or %NULL in case of failure
1410 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1412 struct tb_switch *sw;
1414 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1419 sw->config.depth = tb_route_length(route);
1420 sw->config.route_hi = upper_32_bits(route);
1421 sw->config.route_lo = lower_32_bits(route);
1422 sw->safe_mode = true;
1424 device_initialize(&sw->dev);
1425 sw->dev.parent = parent;
1426 sw->dev.bus = &tb_bus_type;
1427 sw->dev.type = &tb_switch_type;
1428 sw->dev.groups = switch_groups;
1429 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1435 * tb_switch_configure() - Uploads configuration to the switch
1436 * @sw: Switch to configure
1438 * Call this function before the switch is added to the system. It will
1439 * upload configuration to the switch and makes it available for the
1440 * connection manager to use.
1442 * Return: %0 in case of success and negative errno in case of failure
1444 int tb_switch_configure(struct tb_switch *sw)
1446 struct tb *tb = sw->tb;
1450 route = tb_route(sw);
1451 tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1452 route, tb_route_length(route), sw->config.upstream_port_number);
1454 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1455 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1456 sw->config.vendor_id);
1458 sw->config.enabled = 1;
1460 /* upload configuration */
1461 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1465 ret = tb_lc_configure_link(sw);
1469 return tb_plug_events_active(sw, true);
1472 static int tb_switch_set_uuid(struct tb_switch *sw)
1481 * The newer controllers include fused UUID as part of link
1482 * controller specific registers
1484 ret = tb_lc_read_uuid(sw, uuid);
1487 * ICM generates UUID based on UID and fills the upper
1488 * two words with ones. This is not strictly following
1489 * UUID format but we want to be compatible with it so
1490 * we do the same here.
1492 uuid[0] = sw->uid & 0xffffffff;
1493 uuid[1] = (sw->uid >> 32) & 0xffffffff;
1494 uuid[2] = 0xffffffff;
1495 uuid[3] = 0xffffffff;
1498 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1504 static int tb_switch_add_dma_port(struct tb_switch *sw)
1509 switch (sw->generation) {
1514 /* Only root switch can be upgraded */
1521 * DMA port is the only thing available when the switch
1529 if (sw->no_nvm_upgrade)
1532 sw->dma_port = dma_port_alloc(sw);
1537 * Check status of the previous flash authentication. If there
1538 * is one we need to power cycle the switch in any case to make
1539 * it functional again.
1541 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1545 /* Now we can allow root port to suspend again */
1547 nvm_authenticate_complete(sw);
1550 tb_sw_info(sw, "switch flash authentication failed\n");
1551 ret = tb_switch_set_uuid(sw);
1554 nvm_set_auth_status(sw, status);
1557 tb_sw_info(sw, "power cycling the switch now\n");
1558 dma_port_power_cycle(sw->dma_port);
1561 * We return error here which causes the switch adding failure.
1562 * It should appear back after power cycle is complete.
1568 * tb_switch_add() - Add a switch to the domain
1569 * @sw: Switch to add
1571 * This is the last step in adding switch to the domain. It will read
1572 * identification information from DROM and initializes ports so that
1573 * they can be used to connect other switches. The switch will be
1574 * exposed to the userspace when this function successfully returns. To
1575 * remove and release the switch, call tb_switch_remove().
1577 * Return: %0 in case of success and negative errno in case of failure
1579 int tb_switch_add(struct tb_switch *sw)
1584 * Initialize DMA control port now before we read DROM. Recent
1585 * host controllers have more complete DROM on NVM that includes
1586 * vendor and model identification strings which we then expose
1587 * to the userspace. NVM can be accessed through DMA
1588 * configuration based mailbox.
1590 ret = tb_switch_add_dma_port(sw);
1594 if (!sw->safe_mode) {
1596 ret = tb_drom_read(sw);
1598 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1601 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
1603 ret = tb_switch_set_uuid(sw);
1607 for (i = 0; i <= sw->config.max_port_number; i++) {
1608 if (sw->ports[i].disabled) {
1609 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
1612 ret = tb_init_port(&sw->ports[i]);
1618 ret = device_add(&sw->dev);
1623 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
1624 sw->vendor, sw->device);
1625 if (sw->vendor_name && sw->device_name)
1626 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
1630 ret = tb_switch_nvm_add(sw);
1632 device_del(&sw->dev);
1636 pm_runtime_set_active(&sw->dev);
1638 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
1639 pm_runtime_use_autosuspend(&sw->dev);
1640 pm_runtime_mark_last_busy(&sw->dev);
1641 pm_runtime_enable(&sw->dev);
1642 pm_request_autosuspend(&sw->dev);
1649 * tb_switch_remove() - Remove and release a switch
1650 * @sw: Switch to remove
1652 * This will remove the switch from the domain and release it after last
1653 * reference count drops to zero. If there are switches connected below
1654 * this switch, they will be removed as well.
1656 void tb_switch_remove(struct tb_switch *sw)
1661 pm_runtime_get_sync(&sw->dev);
1662 pm_runtime_disable(&sw->dev);
1665 /* port 0 is the switch itself and never has a remote */
1666 for (i = 1; i <= sw->config.max_port_number; i++) {
1667 if (tb_port_has_remote(&sw->ports[i])) {
1668 tb_switch_remove(sw->ports[i].remote->sw);
1669 sw->ports[i].remote = NULL;
1670 } else if (sw->ports[i].xdomain) {
1671 tb_xdomain_remove(sw->ports[i].xdomain);
1672 sw->ports[i].xdomain = NULL;
1676 if (!sw->is_unplugged)
1677 tb_plug_events_active(sw, false);
1678 tb_lc_unconfigure_link(sw);
1680 tb_switch_nvm_remove(sw);
1683 dev_info(&sw->dev, "device disconnected\n");
1684 device_unregister(&sw->dev);
1688 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1690 void tb_sw_set_unplugged(struct tb_switch *sw)
1693 if (sw == sw->tb->root_switch) {
1694 tb_sw_WARN(sw, "cannot unplug root switch\n");
1697 if (sw->is_unplugged) {
1698 tb_sw_WARN(sw, "is_unplugged already set\n");
1701 sw->is_unplugged = true;
1702 for (i = 0; i <= sw->config.max_port_number; i++) {
1703 if (tb_port_has_remote(&sw->ports[i]))
1704 tb_sw_set_unplugged(sw->ports[i].remote->sw);
1708 int tb_switch_resume(struct tb_switch *sw)
1711 tb_sw_dbg(sw, "resuming switch\n");
1714 * Check for UID of the connected switches except for root
1715 * switch which we assume cannot be removed.
1720 err = tb_drom_read_uid_only(sw, &uid);
1722 tb_sw_warn(sw, "uid read failed\n");
1725 if (sw->uid != uid) {
1727 "changed while suspended (uid %#llx -> %#llx)\n",
1733 /* upload configuration */
1734 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1738 err = tb_lc_configure_link(sw);
1742 err = tb_plug_events_active(sw, true);
1746 /* check for surviving downstream switches */
1747 for (i = 1; i <= sw->config.max_port_number; i++) {
1748 struct tb_port *port = &sw->ports[i];
1750 if (!tb_port_has_remote(port))
1753 if (tb_wait_for_port(port, true) <= 0
1754 || tb_switch_resume(port->remote->sw)) {
1756 "lost during suspend, disconnecting\n");
1757 tb_sw_set_unplugged(port->remote->sw);
1763 void tb_switch_suspend(struct tb_switch *sw)
1766 err = tb_plug_events_active(sw, false);
1770 for (i = 1; i <= sw->config.max_port_number; i++) {
1771 if (tb_port_has_remote(&sw->ports[i]))
1772 tb_switch_suspend(sw->ports[i].remote->sw);
1775 tb_lc_set_sleep(sw);
1778 struct tb_sw_lookup {
1786 static int tb_switch_match(struct device *dev, void *data)
1788 struct tb_switch *sw = tb_to_switch(dev);
1789 struct tb_sw_lookup *lookup = data;
1793 if (sw->tb != lookup->tb)
1797 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1799 if (lookup->route) {
1800 return sw->config.route_lo == lower_32_bits(lookup->route) &&
1801 sw->config.route_hi == upper_32_bits(lookup->route);
1804 /* Root switch is matched only by depth */
1808 return sw->link == lookup->link && sw->depth == lookup->depth;
1812 * tb_switch_find_by_link_depth() - Find switch by link and depth
1813 * @tb: Domain the switch belongs
1814 * @link: Link number the switch is connected
1815 * @depth: Depth of the switch in link
1817 * Returned switch has reference count increased so the caller needs to
1818 * call tb_switch_put() when done with the switch.
1820 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1822 struct tb_sw_lookup lookup;
1825 memset(&lookup, 0, sizeof(lookup));
1828 lookup.depth = depth;
1830 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1832 return tb_to_switch(dev);
1838 * tb_switch_find_by_uuid() - Find switch by UUID
1839 * @tb: Domain the switch belongs
1840 * @uuid: UUID to look for
1842 * Returned switch has reference count increased so the caller needs to
1843 * call tb_switch_put() when done with the switch.
1845 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1847 struct tb_sw_lookup lookup;
1850 memset(&lookup, 0, sizeof(lookup));
1854 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1856 return tb_to_switch(dev);
1862 * tb_switch_find_by_route() - Find switch by route string
1863 * @tb: Domain the switch belongs
1864 * @route: Route string to look for
1866 * Returned switch has reference count increased so the caller needs to
1867 * call tb_switch_put() when done with the switch.
1869 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
1871 struct tb_sw_lookup lookup;
1875 return tb_switch_get(tb->root_switch);
1877 memset(&lookup, 0, sizeof(lookup));
1879 lookup.route = route;
1881 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1883 return tb_to_switch(dev);
1888 void tb_switch_exit(void)
1890 ida_destroy(&nvm_ida);