Merge tag 'fsnotify_for_v6.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / thunderbolt / xdomain.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt XDomain discovery protocol support
4  *
5  * Copyright (C) 2017, Intel Corporation
6  * Authors: Michael Jamet <michael.jamet@intel.com>
7  *          Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9
10 #include <linux/device.h>
11 #include <linux/delay.h>
12 #include <linux/kmod.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/prandom.h>
16 #include <linux/string_helpers.h>
17 #include <linux/utsname.h>
18 #include <linux/uuid.h>
19 #include <linux/workqueue.h>
20
21 #include "tb.h"
22
23 #define XDOMAIN_SHORT_TIMEOUT                   100     /* ms */
24 #define XDOMAIN_DEFAULT_TIMEOUT                 1000    /* ms */
25 #define XDOMAIN_BONDING_TIMEOUT                 10000   /* ms */
26 #define XDOMAIN_RETRIES                         10
27 #define XDOMAIN_DEFAULT_MAX_HOPID               15
28
29 enum {
30         XDOMAIN_STATE_INIT,
31         XDOMAIN_STATE_UUID,
32         XDOMAIN_STATE_LINK_STATUS,
33         XDOMAIN_STATE_LINK_STATE_CHANGE,
34         XDOMAIN_STATE_LINK_STATUS2,
35         XDOMAIN_STATE_BONDING_UUID_LOW,
36         XDOMAIN_STATE_BONDING_UUID_HIGH,
37         XDOMAIN_STATE_PROPERTIES,
38         XDOMAIN_STATE_ENUMERATED,
39         XDOMAIN_STATE_ERROR,
40 };
41
42 static const char * const state_names[] = {
43         [XDOMAIN_STATE_INIT] = "INIT",
44         [XDOMAIN_STATE_UUID] = "UUID",
45         [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
46         [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
47         [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
48         [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
49         [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
50         [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
51         [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
52         [XDOMAIN_STATE_ERROR] = "ERROR",
53 };
54
55 struct xdomain_request_work {
56         struct work_struct work;
57         struct tb_xdp_header *pkg;
58         struct tb *tb;
59 };
60
61 static bool tb_xdomain_enabled = true;
62 module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
63 MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
64
65 /*
66  * Serializes access to the properties and protocol handlers below. If
67  * you need to take both this lock and the struct tb_xdomain lock, take
68  * this one first.
69  */
70 static DEFINE_MUTEX(xdomain_lock);
71
72 /* Properties exposed to the remote domains */
73 static struct tb_property_dir *xdomain_property_dir;
74 static u32 xdomain_property_block_gen;
75
76 /* Additional protocol handlers */
77 static LIST_HEAD(protocol_handlers);
78
79 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
80 static const uuid_t tb_xdp_uuid =
81         UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
82                   0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
83
84 bool tb_is_xdomain_enabled(void)
85 {
86         return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
87 }
88
89 static bool tb_xdomain_match(const struct tb_cfg_request *req,
90                              const struct ctl_pkg *pkg)
91 {
92         switch (pkg->frame.eof) {
93         case TB_CFG_PKG_ERROR:
94                 return true;
95
96         case TB_CFG_PKG_XDOMAIN_RESP: {
97                 const struct tb_xdp_header *res_hdr = pkg->buffer;
98                 const struct tb_xdp_header *req_hdr = req->request;
99
100                 if (pkg->frame.size < req->response_size / 4)
101                         return false;
102
103                 /* Make sure route matches */
104                 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
105                      req_hdr->xd_hdr.route_hi)
106                         return false;
107                 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
108                         return false;
109
110                 /* Check that the XDomain protocol matches */
111                 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
112                         return false;
113
114                 return true;
115         }
116
117         default:
118                 return false;
119         }
120 }
121
122 static bool tb_xdomain_copy(struct tb_cfg_request *req,
123                             const struct ctl_pkg *pkg)
124 {
125         memcpy(req->response, pkg->buffer, req->response_size);
126         req->result.err = 0;
127         return true;
128 }
129
130 static void response_ready(void *data)
131 {
132         tb_cfg_request_put(data);
133 }
134
135 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
136                                  size_t size, enum tb_cfg_pkg_type type)
137 {
138         struct tb_cfg_request *req;
139
140         req = tb_cfg_request_alloc();
141         if (!req)
142                 return -ENOMEM;
143
144         req->match = tb_xdomain_match;
145         req->copy = tb_xdomain_copy;
146         req->request = response;
147         req->request_size = size;
148         req->request_type = type;
149
150         return tb_cfg_request(ctl, req, response_ready, req);
151 }
152
153 /**
154  * tb_xdomain_response() - Send a XDomain response message
155  * @xd: XDomain to send the message
156  * @response: Response to send
157  * @size: Size of the response
158  * @type: PDF type of the response
159  *
160  * This can be used to send a XDomain response message to the other
161  * domain. No response for the message is expected.
162  *
163  * Return: %0 in case of success and negative errno in case of failure
164  */
165 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
166                         size_t size, enum tb_cfg_pkg_type type)
167 {
168         return __tb_xdomain_response(xd->tb->ctl, response, size, type);
169 }
170 EXPORT_SYMBOL_GPL(tb_xdomain_response);
171
172 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
173         size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
174         size_t response_size, enum tb_cfg_pkg_type response_type,
175         unsigned int timeout_msec)
176 {
177         struct tb_cfg_request *req;
178         struct tb_cfg_result res;
179
180         req = tb_cfg_request_alloc();
181         if (!req)
182                 return -ENOMEM;
183
184         req->match = tb_xdomain_match;
185         req->copy = tb_xdomain_copy;
186         req->request = request;
187         req->request_size = request_size;
188         req->request_type = request_type;
189         req->response = response;
190         req->response_size = response_size;
191         req->response_type = response_type;
192
193         res = tb_cfg_request_sync(ctl, req, timeout_msec);
194
195         tb_cfg_request_put(req);
196
197         return res.err == 1 ? -EIO : res.err;
198 }
199
200 /**
201  * tb_xdomain_request() - Send a XDomain request
202  * @xd: XDomain to send the request
203  * @request: Request to send
204  * @request_size: Size of the request in bytes
205  * @request_type: PDF type of the request
206  * @response: Response is copied here
207  * @response_size: Expected size of the response in bytes
208  * @response_type: Expected PDF type of the response
209  * @timeout_msec: Timeout in milliseconds to wait for the response
210  *
211  * This function can be used to send XDomain control channel messages to
212  * the other domain. The function waits until the response is received
213  * or when timeout triggers. Whichever comes first.
214  *
215  * Return: %0 in case of success and negative errno in case of failure
216  */
217 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
218         size_t request_size, enum tb_cfg_pkg_type request_type,
219         void *response, size_t response_size,
220         enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
221 {
222         return __tb_xdomain_request(xd->tb->ctl, request, request_size,
223                                     request_type, response, response_size,
224                                     response_type, timeout_msec);
225 }
226 EXPORT_SYMBOL_GPL(tb_xdomain_request);
227
228 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
229         u8 sequence, enum tb_xdp_type type, size_t size)
230 {
231         u32 length_sn;
232
233         length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
234         length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
235
236         hdr->xd_hdr.route_hi = upper_32_bits(route);
237         hdr->xd_hdr.route_lo = lower_32_bits(route);
238         hdr->xd_hdr.length_sn = length_sn;
239         hdr->type = type;
240         memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
241 }
242
243 static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
244 {
245         if (res->hdr.type != ERROR_RESPONSE)
246                 return 0;
247
248         switch (res->error) {
249         case ERROR_UNKNOWN_PACKET:
250         case ERROR_UNKNOWN_DOMAIN:
251                 return -EIO;
252         case ERROR_NOT_SUPPORTED:
253                 return -ENOTSUPP;
254         case ERROR_NOT_READY:
255                 return -EAGAIN;
256         default:
257                 break;
258         }
259
260         return 0;
261 }
262
263 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
264                                uuid_t *uuid, u64 *remote_route)
265 {
266         struct tb_xdp_uuid_response res;
267         struct tb_xdp_uuid req;
268         int ret;
269
270         memset(&req, 0, sizeof(req));
271         tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
272                            sizeof(req));
273
274         memset(&res, 0, sizeof(res));
275         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
276                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
277                                    TB_CFG_PKG_XDOMAIN_RESP,
278                                    XDOMAIN_DEFAULT_TIMEOUT);
279         if (ret)
280                 return ret;
281
282         ret = tb_xdp_handle_error(&res.err);
283         if (ret)
284                 return ret;
285
286         uuid_copy(uuid, &res.src_uuid);
287         *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
288
289         return 0;
290 }
291
292 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
293                                 const uuid_t *uuid)
294 {
295         struct tb_xdp_uuid_response res;
296
297         memset(&res, 0, sizeof(res));
298         tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
299                            sizeof(res));
300
301         uuid_copy(&res.src_uuid, uuid);
302         res.src_route_hi = upper_32_bits(route);
303         res.src_route_lo = lower_32_bits(route);
304
305         return __tb_xdomain_response(ctl, &res, sizeof(res),
306                                      TB_CFG_PKG_XDOMAIN_RESP);
307 }
308
309 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
310                                  enum tb_xdp_error error)
311 {
312         struct tb_xdp_error_response res;
313
314         memset(&res, 0, sizeof(res));
315         tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
316                            sizeof(res));
317         res.error = error;
318
319         return __tb_xdomain_response(ctl, &res, sizeof(res),
320                                      TB_CFG_PKG_XDOMAIN_RESP);
321 }
322
323 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
324         const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
325         u32 **block, u32 *generation)
326 {
327         struct tb_xdp_properties_response *res;
328         struct tb_xdp_properties req;
329         u16 data_len, len;
330         size_t total_size;
331         u32 *data = NULL;
332         int ret;
333
334         total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
335         res = kzalloc(total_size, GFP_KERNEL);
336         if (!res)
337                 return -ENOMEM;
338
339         memset(&req, 0, sizeof(req));
340         tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
341                            sizeof(req));
342         memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
343         memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
344
345         data_len = 0;
346
347         do {
348                 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
349                                            TB_CFG_PKG_XDOMAIN_REQ, res,
350                                            total_size, TB_CFG_PKG_XDOMAIN_RESP,
351                                            XDOMAIN_DEFAULT_TIMEOUT);
352                 if (ret)
353                         goto err;
354
355                 ret = tb_xdp_handle_error(&res->err);
356                 if (ret)
357                         goto err;
358
359                 /*
360                  * Package length includes the whole payload without the
361                  * XDomain header. Validate first that the package is at
362                  * least size of the response structure.
363                  */
364                 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
365                 if (len < sizeof(*res) / 4) {
366                         ret = -EINVAL;
367                         goto err;
368                 }
369
370                 len += sizeof(res->hdr.xd_hdr) / 4;
371                 len -= sizeof(*res) / 4;
372
373                 if (res->offset != req.offset) {
374                         ret = -EINVAL;
375                         goto err;
376                 }
377
378                 /*
379                  * First time allocate block that has enough space for
380                  * the whole properties block.
381                  */
382                 if (!data) {
383                         data_len = res->data_length;
384                         if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
385                                 ret = -E2BIG;
386                                 goto err;
387                         }
388
389                         data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
390                         if (!data) {
391                                 ret = -ENOMEM;
392                                 goto err;
393                         }
394                 }
395
396                 memcpy(data + req.offset, res->data, len * 4);
397                 req.offset += len;
398         } while (!data_len || req.offset < data_len);
399
400         *block = data;
401         *generation = res->generation;
402
403         kfree(res);
404
405         return data_len;
406
407 err:
408         kfree(data);
409         kfree(res);
410
411         return ret;
412 }
413
414 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
415         struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
416 {
417         struct tb_xdp_properties_response *res;
418         size_t total_size;
419         u16 len;
420         int ret;
421
422         /*
423          * Currently we expect all requests to be directed to us. The
424          * protocol supports forwarding, though which we might add
425          * support later on.
426          */
427         if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
428                 tb_xdp_error_response(ctl, xd->route, sequence,
429                                       ERROR_UNKNOWN_DOMAIN);
430                 return 0;
431         }
432
433         mutex_lock(&xd->lock);
434
435         if (req->offset >= xd->local_property_block_len) {
436                 mutex_unlock(&xd->lock);
437                 return -EINVAL;
438         }
439
440         len = xd->local_property_block_len - req->offset;
441         len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
442         total_size = sizeof(*res) + len * 4;
443
444         res = kzalloc(total_size, GFP_KERNEL);
445         if (!res) {
446                 mutex_unlock(&xd->lock);
447                 return -ENOMEM;
448         }
449
450         tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
451                            total_size);
452         res->generation = xd->local_property_block_gen;
453         res->data_length = xd->local_property_block_len;
454         res->offset = req->offset;
455         uuid_copy(&res->src_uuid, xd->local_uuid);
456         uuid_copy(&res->dst_uuid, &req->src_uuid);
457         memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
458
459         mutex_unlock(&xd->lock);
460
461         ret = __tb_xdomain_response(ctl, res, total_size,
462                                     TB_CFG_PKG_XDOMAIN_RESP);
463
464         kfree(res);
465         return ret;
466 }
467
468 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
469                                              int retry, const uuid_t *uuid)
470 {
471         struct tb_xdp_properties_changed_response res;
472         struct tb_xdp_properties_changed req;
473         int ret;
474
475         memset(&req, 0, sizeof(req));
476         tb_xdp_fill_header(&req.hdr, route, retry % 4,
477                            PROPERTIES_CHANGED_REQUEST, sizeof(req));
478         uuid_copy(&req.src_uuid, uuid);
479
480         memset(&res, 0, sizeof(res));
481         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
482                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
483                                    TB_CFG_PKG_XDOMAIN_RESP,
484                                    XDOMAIN_DEFAULT_TIMEOUT);
485         if (ret)
486                 return ret;
487
488         return tb_xdp_handle_error(&res.err);
489 }
490
491 static int
492 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
493 {
494         struct tb_xdp_properties_changed_response res;
495
496         memset(&res, 0, sizeof(res));
497         tb_xdp_fill_header(&res.hdr, route, sequence,
498                            PROPERTIES_CHANGED_RESPONSE, sizeof(res));
499         return __tb_xdomain_response(ctl, &res, sizeof(res),
500                                      TB_CFG_PKG_XDOMAIN_RESP);
501 }
502
503 static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
504                                             u8 sequence, u8 *slw, u8 *tlw,
505                                             u8 *sls, u8 *tls)
506 {
507         struct tb_xdp_link_state_status_response res;
508         struct tb_xdp_link_state_status req;
509         int ret;
510
511         memset(&req, 0, sizeof(req));
512         tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
513                            sizeof(req));
514
515         memset(&res, 0, sizeof(res));
516         ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
517                                    &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
518                                    XDOMAIN_DEFAULT_TIMEOUT);
519         if (ret)
520                 return ret;
521
522         ret = tb_xdp_handle_error(&res.err);
523         if (ret)
524                 return ret;
525
526         if (res.status != 0)
527                 return -EREMOTEIO;
528
529         *slw = res.slw;
530         *tlw = res.tlw;
531         *sls = res.sls;
532         *tls = res.tls;
533
534         return 0;
535 }
536
537 static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
538                                              struct tb_xdomain *xd, u8 sequence)
539 {
540         struct tb_xdp_link_state_status_response res;
541         struct tb_port *port = tb_xdomain_downstream_port(xd);
542         u32 val[2];
543         int ret;
544
545         memset(&res, 0, sizeof(res));
546         tb_xdp_fill_header(&res.hdr, xd->route, sequence,
547                            LINK_STATE_STATUS_RESPONSE, sizeof(res));
548
549         ret = tb_port_read(port, val, TB_CFG_PORT,
550                            port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
551         if (ret)
552                 return ret;
553
554         res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
555                         LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
556         res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
557                         LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
558         res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
559         res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
560                         LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
561
562         return __tb_xdomain_response(ctl, &res, sizeof(res),
563                                      TB_CFG_PKG_XDOMAIN_RESP);
564 }
565
566 static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
567                                             u8 sequence, u8 tlw, u8 tls)
568 {
569         struct tb_xdp_link_state_change_response res;
570         struct tb_xdp_link_state_change req;
571         int ret;
572
573         memset(&req, 0, sizeof(req));
574         tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
575                            sizeof(req));
576         req.tlw = tlw;
577         req.tls = tls;
578
579         memset(&res, 0, sizeof(res));
580         ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
581                                    &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
582                                    XDOMAIN_DEFAULT_TIMEOUT);
583         if (ret)
584                 return ret;
585
586         ret = tb_xdp_handle_error(&res.err);
587         if (ret)
588                 return ret;
589
590         return res.status != 0 ? -EREMOTEIO : 0;
591 }
592
593 static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
594                                              u8 sequence, u32 status)
595 {
596         struct tb_xdp_link_state_change_response res;
597
598         memset(&res, 0, sizeof(res));
599         tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
600                            sizeof(res));
601
602         res.status = status;
603
604         return __tb_xdomain_response(ctl, &res, sizeof(res),
605                                      TB_CFG_PKG_XDOMAIN_RESP);
606 }
607
608 /**
609  * tb_register_protocol_handler() - Register protocol handler
610  * @handler: Handler to register
611  *
612  * This allows XDomain service drivers to hook into incoming XDomain
613  * messages. After this function is called the service driver needs to
614  * be able to handle calls to callback whenever a package with the
615  * registered protocol is received.
616  */
617 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
618 {
619         if (!handler->uuid || !handler->callback)
620                 return -EINVAL;
621         if (uuid_equal(handler->uuid, &tb_xdp_uuid))
622                 return -EINVAL;
623
624         mutex_lock(&xdomain_lock);
625         list_add_tail(&handler->list, &protocol_handlers);
626         mutex_unlock(&xdomain_lock);
627
628         return 0;
629 }
630 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
631
632 /**
633  * tb_unregister_protocol_handler() - Unregister protocol handler
634  * @handler: Handler to unregister
635  *
636  * Removes the previously registered protocol handler.
637  */
638 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
639 {
640         mutex_lock(&xdomain_lock);
641         list_del_init(&handler->list);
642         mutex_unlock(&xdomain_lock);
643 }
644 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
645
646 static void update_property_block(struct tb_xdomain *xd)
647 {
648         mutex_lock(&xdomain_lock);
649         mutex_lock(&xd->lock);
650         /*
651          * If the local property block is not up-to-date, rebuild it now
652          * based on the global property template.
653          */
654         if (!xd->local_property_block ||
655             xd->local_property_block_gen < xdomain_property_block_gen) {
656                 struct tb_property_dir *dir;
657                 int ret, block_len;
658                 u32 *block;
659
660                 dir = tb_property_copy_dir(xdomain_property_dir);
661                 if (!dir) {
662                         dev_warn(&xd->dev, "failed to copy properties\n");
663                         goto out_unlock;
664                 }
665
666                 /* Fill in non-static properties now */
667                 tb_property_add_text(dir, "deviceid", utsname()->nodename);
668                 tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
669
670                 ret = tb_property_format_dir(dir, NULL, 0);
671                 if (ret < 0) {
672                         dev_warn(&xd->dev, "local property block creation failed\n");
673                         tb_property_free_dir(dir);
674                         goto out_unlock;
675                 }
676
677                 block_len = ret;
678                 block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
679                 if (!block) {
680                         tb_property_free_dir(dir);
681                         goto out_unlock;
682                 }
683
684                 ret = tb_property_format_dir(dir, block, block_len);
685                 if (ret) {
686                         dev_warn(&xd->dev, "property block generation failed\n");
687                         tb_property_free_dir(dir);
688                         kfree(block);
689                         goto out_unlock;
690                 }
691
692                 tb_property_free_dir(dir);
693                 /* Release the previous block */
694                 kfree(xd->local_property_block);
695                 /* Assign new one */
696                 xd->local_property_block = block;
697                 xd->local_property_block_len = block_len;
698                 xd->local_property_block_gen = xdomain_property_block_gen;
699         }
700
701 out_unlock:
702         mutex_unlock(&xd->lock);
703         mutex_unlock(&xdomain_lock);
704 }
705
706 static void tb_xdp_handle_request(struct work_struct *work)
707 {
708         struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
709         const struct tb_xdp_header *pkg = xw->pkg;
710         const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
711         struct tb *tb = xw->tb;
712         struct tb_ctl *ctl = tb->ctl;
713         struct tb_xdomain *xd;
714         const uuid_t *uuid;
715         int ret = 0;
716         u32 sequence;
717         u64 route;
718
719         route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
720         sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
721         sequence >>= TB_XDOMAIN_SN_SHIFT;
722
723         mutex_lock(&tb->lock);
724         if (tb->root_switch)
725                 uuid = tb->root_switch->uuid;
726         else
727                 uuid = NULL;
728         mutex_unlock(&tb->lock);
729
730         if (!uuid) {
731                 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
732                 goto out;
733         }
734
735         xd = tb_xdomain_find_by_route_locked(tb, route);
736         if (xd)
737                 update_property_block(xd);
738
739         switch (pkg->type) {
740         case PROPERTIES_REQUEST:
741                 tb_dbg(tb, "%llx: received XDomain properties request\n", route);
742                 if (xd) {
743                         ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
744                                 (const struct tb_xdp_properties *)pkg);
745                 }
746                 break;
747
748         case PROPERTIES_CHANGED_REQUEST:
749                 tb_dbg(tb, "%llx: received XDomain properties changed request\n",
750                        route);
751
752                 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
753
754                 /*
755                  * Since the properties have been changed, let's update
756                  * the xdomain related to this connection as well in
757                  * case there is a change in services it offers.
758                  */
759                 if (xd && device_is_registered(&xd->dev))
760                         queue_delayed_work(tb->wq, &xd->state_work,
761                                            msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
762                 break;
763
764         case UUID_REQUEST_OLD:
765         case UUID_REQUEST:
766                 tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
767                 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
768                 break;
769
770         case LINK_STATE_STATUS_REQUEST:
771                 tb_dbg(tb, "%llx: received XDomain link state status request\n",
772                        route);
773
774                 if (xd) {
775                         ret = tb_xdp_link_state_status_response(tb, ctl, xd,
776                                                                 sequence);
777                 } else {
778                         tb_xdp_error_response(ctl, route, sequence,
779                                               ERROR_NOT_READY);
780                 }
781                 break;
782
783         case LINK_STATE_CHANGE_REQUEST:
784                 tb_dbg(tb, "%llx: received XDomain link state change request\n",
785                        route);
786
787                 if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
788                         const struct tb_xdp_link_state_change *lsc =
789                                 (const struct tb_xdp_link_state_change *)pkg;
790
791                         ret = tb_xdp_link_state_change_response(ctl, route,
792                                                                 sequence, 0);
793                         xd->target_link_width = lsc->tlw;
794                         queue_delayed_work(tb->wq, &xd->state_work,
795                                            msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
796                 } else {
797                         tb_xdp_error_response(ctl, route, sequence,
798                                               ERROR_NOT_READY);
799                 }
800                 break;
801
802         default:
803                 tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
804                 tb_xdp_error_response(ctl, route, sequence,
805                                       ERROR_NOT_SUPPORTED);
806                 break;
807         }
808
809         tb_xdomain_put(xd);
810
811         if (ret) {
812                 tb_warn(tb, "failed to send XDomain response for %#x\n",
813                         pkg->type);
814         }
815
816 out:
817         kfree(xw->pkg);
818         kfree(xw);
819
820         tb_domain_put(tb);
821 }
822
823 static bool
824 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
825                         size_t size)
826 {
827         struct xdomain_request_work *xw;
828
829         xw = kmalloc(sizeof(*xw), GFP_KERNEL);
830         if (!xw)
831                 return false;
832
833         INIT_WORK(&xw->work, tb_xdp_handle_request);
834         xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
835         if (!xw->pkg) {
836                 kfree(xw);
837                 return false;
838         }
839         xw->tb = tb_domain_get(tb);
840
841         schedule_work(&xw->work);
842         return true;
843 }
844
845 /**
846  * tb_register_service_driver() - Register XDomain service driver
847  * @drv: Driver to register
848  *
849  * Registers new service driver from @drv to the bus.
850  */
851 int tb_register_service_driver(struct tb_service_driver *drv)
852 {
853         drv->driver.bus = &tb_bus_type;
854         return driver_register(&drv->driver);
855 }
856 EXPORT_SYMBOL_GPL(tb_register_service_driver);
857
858 /**
859  * tb_unregister_service_driver() - Unregister XDomain service driver
860  * @drv: Driver to unregister
861  *
862  * Unregisters XDomain service driver from the bus.
863  */
864 void tb_unregister_service_driver(struct tb_service_driver *drv)
865 {
866         driver_unregister(&drv->driver);
867 }
868 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
869
870 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
871                         char *buf)
872 {
873         struct tb_service *svc = container_of(dev, struct tb_service, dev);
874
875         /*
876          * It should be null terminated but anything else is pretty much
877          * allowed.
878          */
879         return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
880 }
881 static DEVICE_ATTR_RO(key);
882
883 static int get_modalias(const struct tb_service *svc, char *buf, size_t size)
884 {
885         return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
886                         svc->prtcid, svc->prtcvers, svc->prtcrevs);
887 }
888
889 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
890                              char *buf)
891 {
892         struct tb_service *svc = container_of(dev, struct tb_service, dev);
893
894         /* Full buffer size except new line and null termination */
895         get_modalias(svc, buf, PAGE_SIZE - 2);
896         return strlen(strcat(buf, "\n"));
897 }
898 static DEVICE_ATTR_RO(modalias);
899
900 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
901                            char *buf)
902 {
903         struct tb_service *svc = container_of(dev, struct tb_service, dev);
904
905         return sysfs_emit(buf, "%u\n", svc->prtcid);
906 }
907 static DEVICE_ATTR_RO(prtcid);
908
909 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
910                              char *buf)
911 {
912         struct tb_service *svc = container_of(dev, struct tb_service, dev);
913
914         return sysfs_emit(buf, "%u\n", svc->prtcvers);
915 }
916 static DEVICE_ATTR_RO(prtcvers);
917
918 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
919                              char *buf)
920 {
921         struct tb_service *svc = container_of(dev, struct tb_service, dev);
922
923         return sysfs_emit(buf, "%u\n", svc->prtcrevs);
924 }
925 static DEVICE_ATTR_RO(prtcrevs);
926
927 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
928                              char *buf)
929 {
930         struct tb_service *svc = container_of(dev, struct tb_service, dev);
931
932         return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
933 }
934 static DEVICE_ATTR_RO(prtcstns);
935
936 static struct attribute *tb_service_attrs[] = {
937         &dev_attr_key.attr,
938         &dev_attr_modalias.attr,
939         &dev_attr_prtcid.attr,
940         &dev_attr_prtcvers.attr,
941         &dev_attr_prtcrevs.attr,
942         &dev_attr_prtcstns.attr,
943         NULL,
944 };
945
946 static const struct attribute_group tb_service_attr_group = {
947         .attrs = tb_service_attrs,
948 };
949
950 static const struct attribute_group *tb_service_attr_groups[] = {
951         &tb_service_attr_group,
952         NULL,
953 };
954
955 static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env)
956 {
957         const struct tb_service *svc = container_of_const(dev, struct tb_service, dev);
958         char modalias[64];
959
960         get_modalias(svc, modalias, sizeof(modalias));
961         return add_uevent_var(env, "MODALIAS=%s", modalias);
962 }
963
964 static void tb_service_release(struct device *dev)
965 {
966         struct tb_service *svc = container_of(dev, struct tb_service, dev);
967         struct tb_xdomain *xd = tb_service_parent(svc);
968
969         tb_service_debugfs_remove(svc);
970         ida_simple_remove(&xd->service_ids, svc->id);
971         kfree(svc->key);
972         kfree(svc);
973 }
974
975 struct device_type tb_service_type = {
976         .name = "thunderbolt_service",
977         .groups = tb_service_attr_groups,
978         .uevent = tb_service_uevent,
979         .release = tb_service_release,
980 };
981 EXPORT_SYMBOL_GPL(tb_service_type);
982
983 static int remove_missing_service(struct device *dev, void *data)
984 {
985         struct tb_xdomain *xd = data;
986         struct tb_service *svc;
987
988         svc = tb_to_service(dev);
989         if (!svc)
990                 return 0;
991
992         if (!tb_property_find(xd->remote_properties, svc->key,
993                               TB_PROPERTY_TYPE_DIRECTORY))
994                 device_unregister(dev);
995
996         return 0;
997 }
998
999 static int find_service(struct device *dev, void *data)
1000 {
1001         const struct tb_property *p = data;
1002         struct tb_service *svc;
1003
1004         svc = tb_to_service(dev);
1005         if (!svc)
1006                 return 0;
1007
1008         return !strcmp(svc->key, p->key);
1009 }
1010
1011 static int populate_service(struct tb_service *svc,
1012                             struct tb_property *property)
1013 {
1014         struct tb_property_dir *dir = property->value.dir;
1015         struct tb_property *p;
1016
1017         /* Fill in standard properties */
1018         p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
1019         if (p)
1020                 svc->prtcid = p->value.immediate;
1021         p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
1022         if (p)
1023                 svc->prtcvers = p->value.immediate;
1024         p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
1025         if (p)
1026                 svc->prtcrevs = p->value.immediate;
1027         p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
1028         if (p)
1029                 svc->prtcstns = p->value.immediate;
1030
1031         svc->key = kstrdup(property->key, GFP_KERNEL);
1032         if (!svc->key)
1033                 return -ENOMEM;
1034
1035         return 0;
1036 }
1037
1038 static void enumerate_services(struct tb_xdomain *xd)
1039 {
1040         struct tb_service *svc;
1041         struct tb_property *p;
1042         struct device *dev;
1043         int id;
1044
1045         /*
1046          * First remove all services that are not available anymore in
1047          * the updated property block.
1048          */
1049         device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
1050
1051         /* Then re-enumerate properties creating new services as we go */
1052         tb_property_for_each(xd->remote_properties, p) {
1053                 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
1054                         continue;
1055
1056                 /* If the service exists already we are fine */
1057                 dev = device_find_child(&xd->dev, p, find_service);
1058                 if (dev) {
1059                         put_device(dev);
1060                         continue;
1061                 }
1062
1063                 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1064                 if (!svc)
1065                         break;
1066
1067                 if (populate_service(svc, p)) {
1068                         kfree(svc);
1069                         break;
1070                 }
1071
1072                 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
1073                 if (id < 0) {
1074                         kfree(svc->key);
1075                         kfree(svc);
1076                         break;
1077                 }
1078                 svc->id = id;
1079                 svc->dev.bus = &tb_bus_type;
1080                 svc->dev.type = &tb_service_type;
1081                 svc->dev.parent = &xd->dev;
1082                 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
1083
1084                 tb_service_debugfs_init(svc);
1085
1086                 if (device_register(&svc->dev)) {
1087                         put_device(&svc->dev);
1088                         break;
1089                 }
1090         }
1091 }
1092
1093 static int populate_properties(struct tb_xdomain *xd,
1094                                struct tb_property_dir *dir)
1095 {
1096         const struct tb_property *p;
1097
1098         /* Required properties */
1099         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1100         if (!p)
1101                 return -EINVAL;
1102         xd->device = p->value.immediate;
1103
1104         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
1105         if (!p)
1106                 return -EINVAL;
1107         xd->vendor = p->value.immediate;
1108
1109         p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
1110         /*
1111          * USB4 inter-domain spec suggests using 15 as HopID if the
1112          * other end does not announce it in a property. This is for
1113          * TBT3 compatibility.
1114          */
1115         xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
1116
1117         kfree(xd->device_name);
1118         xd->device_name = NULL;
1119         kfree(xd->vendor_name);
1120         xd->vendor_name = NULL;
1121
1122         /* Optional properties */
1123         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1124         if (p)
1125                 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
1126         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
1127         if (p)
1128                 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
1129
1130         return 0;
1131 }
1132
1133 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
1134 {
1135         bool change = false;
1136         struct tb_port *port;
1137         int ret;
1138
1139         port = tb_xdomain_downstream_port(xd);
1140
1141         ret = tb_port_get_link_speed(port);
1142         if (ret < 0)
1143                 return ret;
1144
1145         if (xd->link_speed != ret)
1146                 change = true;
1147
1148         xd->link_speed = ret;
1149
1150         ret = tb_port_get_link_width(port);
1151         if (ret < 0)
1152                 return ret;
1153
1154         if (xd->link_width != ret)
1155                 change = true;
1156
1157         xd->link_width = ret;
1158
1159         if (change)
1160                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1161
1162         return 0;
1163 }
1164
1165 static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
1166 {
1167         struct tb *tb = xd->tb;
1168         uuid_t uuid;
1169         u64 route;
1170         int ret;
1171
1172         dev_dbg(&xd->dev, "requesting remote UUID\n");
1173
1174         ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
1175                                   &route);
1176         if (ret < 0) {
1177                 if (xd->state_retries-- > 0) {
1178                         dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
1179                         return -EAGAIN;
1180                 }
1181                 dev_dbg(&xd->dev, "failed to read remote UUID\n");
1182                 return ret;
1183         }
1184
1185         dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
1186
1187         if (uuid_equal(&uuid, xd->local_uuid)) {
1188                 if (route == xd->route)
1189                         dev_dbg(&xd->dev, "loop back detected\n");
1190                 else
1191                         dev_dbg(&xd->dev, "intra-domain loop detected\n");
1192
1193                 /* Don't bond lanes automatically for loops */
1194                 xd->bonding_possible = false;
1195         }
1196
1197         /*
1198          * If the UUID is different, there is another domain connected
1199          * so mark this one unplugged and wait for the connection
1200          * manager to replace it.
1201          */
1202         if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1203                 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1204                 xd->is_unplugged = true;
1205                 return -ENODEV;
1206         }
1207
1208         /* First time fill in the missing UUID */
1209         if (!xd->remote_uuid) {
1210                 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1211                 if (!xd->remote_uuid)
1212                         return -ENOMEM;
1213         }
1214
1215         return 0;
1216 }
1217
1218 static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
1219 {
1220         struct tb *tb = xd->tb;
1221         u8 slw, tlw, sls, tls;
1222         int ret;
1223
1224         dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
1225                 xd->remote_uuid);
1226
1227         ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
1228                                                xd->state_retries, &slw, &tlw, &sls,
1229                                                &tls);
1230         if (ret) {
1231                 if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1232                         dev_dbg(&xd->dev,
1233                                 "failed to request remote link status, retrying\n");
1234                         return -EAGAIN;
1235                 }
1236                 dev_dbg(&xd->dev, "failed to receive remote link status\n");
1237                 return ret;
1238         }
1239
1240         dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
1241
1242         if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
1243                 dev_dbg(&xd->dev, "remote adapter is single lane only\n");
1244                 return -EOPNOTSUPP;
1245         }
1246
1247         return 0;
1248 }
1249
1250 static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
1251                                         unsigned int width)
1252 {
1253         struct tb_port *port = tb_xdomain_downstream_port(xd);
1254         struct tb *tb = xd->tb;
1255         u8 tlw, tls;
1256         u32 val;
1257         int ret;
1258
1259         if (width == 2)
1260                 tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
1261         else if (width == 1)
1262                 tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
1263         else
1264                 return -EINVAL;
1265
1266         /* Use the current target speed */
1267         ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
1268         if (ret)
1269                 return ret;
1270         tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
1271
1272         dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
1273                 tlw, tls);
1274
1275         ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
1276                                                xd->state_retries, tlw, tls);
1277         if (ret) {
1278                 if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1279                         dev_dbg(&xd->dev,
1280                                 "failed to change remote link state, retrying\n");
1281                         return -EAGAIN;
1282                 }
1283                 dev_err(&xd->dev, "failed request link state change, aborting\n");
1284                 return ret;
1285         }
1286
1287         dev_dbg(&xd->dev, "received link state change response\n");
1288         return 0;
1289 }
1290
1291 static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
1292 {
1293         unsigned int width, width_mask;
1294         struct tb_port *port;
1295         int ret;
1296
1297         if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
1298                 width = TB_LINK_WIDTH_SINGLE;
1299                 width_mask = width;
1300         } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
1301                 width = TB_LINK_WIDTH_DUAL;
1302                 width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
1303         } else {
1304                 if (xd->state_retries-- > 0) {
1305                         dev_dbg(&xd->dev,
1306                                 "link state change request not received yet, retrying\n");
1307                         return -EAGAIN;
1308                 }
1309                 dev_dbg(&xd->dev, "timeout waiting for link change request\n");
1310                 return -ETIMEDOUT;
1311         }
1312
1313         port = tb_xdomain_downstream_port(xd);
1314
1315         /*
1316          * We can't use tb_xdomain_lane_bonding_enable() here because it
1317          * is the other side that initiates lane bonding. So here we
1318          * just set the width to both lane adapters and wait for the
1319          * link to transition bonded.
1320          */
1321         ret = tb_port_set_link_width(port->dual_link_port, width);
1322         if (ret) {
1323                 tb_port_warn(port->dual_link_port,
1324                              "failed to set link width to %d\n", width);
1325                 return ret;
1326         }
1327
1328         ret = tb_port_set_link_width(port, width);
1329         if (ret) {
1330                 tb_port_warn(port, "failed to set link width to %d\n", width);
1331                 return ret;
1332         }
1333
1334         ret = tb_port_wait_for_link_width(port, width_mask,
1335                                           XDOMAIN_BONDING_TIMEOUT);
1336         if (ret) {
1337                 dev_warn(&xd->dev, "error waiting for link width to become %d\n",
1338                          width_mask);
1339                 return ret;
1340         }
1341
1342         port->bonded = width > TB_LINK_WIDTH_SINGLE;
1343         port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
1344
1345         tb_port_update_credits(port);
1346         tb_xdomain_update_link_attributes(xd);
1347
1348         dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2));
1349         return 0;
1350 }
1351
1352 static int tb_xdomain_get_properties(struct tb_xdomain *xd)
1353 {
1354         struct tb_property_dir *dir;
1355         struct tb *tb = xd->tb;
1356         bool update = false;
1357         u32 *block = NULL;
1358         u32 gen = 0;
1359         int ret;
1360
1361         dev_dbg(&xd->dev, "requesting remote properties\n");
1362
1363         ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1364                                         xd->remote_uuid, xd->state_retries,
1365                                         &block, &gen);
1366         if (ret < 0) {
1367                 if (xd->state_retries-- > 0) {
1368                         dev_dbg(&xd->dev,
1369                                 "failed to request remote properties, retrying\n");
1370                         return -EAGAIN;
1371                 }
1372                 /* Give up now */
1373                 dev_err(&xd->dev, "failed read XDomain properties from %pUb\n",
1374                         xd->remote_uuid);
1375
1376                 return ret;
1377         }
1378
1379         mutex_lock(&xd->lock);
1380
1381         /* Only accept newer generation properties */
1382         if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
1383                 ret = 0;
1384                 goto err_free_block;
1385         }
1386
1387         dir = tb_property_parse_dir(block, ret);
1388         if (!dir) {
1389                 dev_err(&xd->dev, "failed to parse XDomain properties\n");
1390                 ret = -ENOMEM;
1391                 goto err_free_block;
1392         }
1393
1394         ret = populate_properties(xd, dir);
1395         if (ret) {
1396                 dev_err(&xd->dev, "missing XDomain properties in response\n");
1397                 goto err_free_dir;
1398         }
1399
1400         /* Release the existing one */
1401         if (xd->remote_properties) {
1402                 tb_property_free_dir(xd->remote_properties);
1403                 update = true;
1404         }
1405
1406         xd->remote_properties = dir;
1407         xd->remote_property_block_gen = gen;
1408
1409         tb_xdomain_update_link_attributes(xd);
1410
1411         mutex_unlock(&xd->lock);
1412
1413         kfree(block);
1414
1415         /*
1416          * Now the device should be ready enough so we can add it to the
1417          * bus and let userspace know about it. If the device is already
1418          * registered, we notify the userspace that it has changed.
1419          */
1420         if (!update) {
1421                 /*
1422                  * Now disable lane 1 if bonding was not enabled. Do
1423                  * this only if bonding was possible at the beginning
1424                  * (that is we are the connection manager and there are
1425                  * two lanes).
1426                  */
1427                 if (xd->bonding_possible) {
1428                         struct tb_port *port;
1429
1430                         port = tb_xdomain_downstream_port(xd);
1431                         if (!port->bonded)
1432                                 tb_port_disable(port->dual_link_port);
1433                 }
1434
1435                 if (device_add(&xd->dev)) {
1436                         dev_err(&xd->dev, "failed to add XDomain device\n");
1437                         return -ENODEV;
1438                 }
1439                 dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
1440                          xd->vendor, xd->device);
1441                 if (xd->vendor_name && xd->device_name)
1442                         dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
1443                                  xd->device_name);
1444
1445                 tb_xdomain_debugfs_init(xd);
1446         } else {
1447                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1448         }
1449
1450         enumerate_services(xd);
1451         return 0;
1452
1453 err_free_dir:
1454         tb_property_free_dir(dir);
1455 err_free_block:
1456         kfree(block);
1457         mutex_unlock(&xd->lock);
1458
1459         return ret;
1460 }
1461
1462 static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
1463 {
1464         xd->state = XDOMAIN_STATE_UUID;
1465         xd->state_retries = XDOMAIN_RETRIES;
1466         queue_delayed_work(xd->tb->wq, &xd->state_work,
1467                            msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1468 }
1469
1470 static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
1471 {
1472         xd->state = XDOMAIN_STATE_LINK_STATUS;
1473         xd->state_retries = XDOMAIN_RETRIES;
1474         queue_delayed_work(xd->tb->wq, &xd->state_work,
1475                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1476 }
1477
1478 static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
1479 {
1480         xd->state = XDOMAIN_STATE_LINK_STATUS2;
1481         xd->state_retries = XDOMAIN_RETRIES;
1482         queue_delayed_work(xd->tb->wq, &xd->state_work,
1483                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1484 }
1485
1486 static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
1487 {
1488         if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
1489                 dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
1490                 xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
1491         } else {
1492                 dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
1493                 xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
1494         }
1495
1496         xd->state_retries = XDOMAIN_RETRIES;
1497         queue_delayed_work(xd->tb->wq, &xd->state_work,
1498                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1499 }
1500
1501 static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
1502 {
1503         xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
1504         xd->state_retries = XDOMAIN_RETRIES;
1505         queue_delayed_work(xd->tb->wq, &xd->state_work,
1506                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1507 }
1508
1509 static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
1510 {
1511         xd->state = XDOMAIN_STATE_PROPERTIES;
1512         xd->state_retries = XDOMAIN_RETRIES;
1513         queue_delayed_work(xd->tb->wq, &xd->state_work,
1514                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1515 }
1516
1517 static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
1518 {
1519         xd->properties_changed_retries = XDOMAIN_RETRIES;
1520         queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1521                            msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1522 }
1523
1524 static void tb_xdomain_state_work(struct work_struct *work)
1525 {
1526         struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
1527         int ret, state = xd->state;
1528
1529         if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
1530                          state > XDOMAIN_STATE_ERROR))
1531                 return;
1532
1533         dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
1534
1535         switch (state) {
1536         case XDOMAIN_STATE_INIT:
1537                 if (xd->needs_uuid) {
1538                         tb_xdomain_queue_uuid(xd);
1539                 } else {
1540                         tb_xdomain_queue_properties_changed(xd);
1541                         tb_xdomain_queue_properties(xd);
1542                 }
1543                 break;
1544
1545         case XDOMAIN_STATE_UUID:
1546                 ret = tb_xdomain_get_uuid(xd);
1547                 if (ret) {
1548                         if (ret == -EAGAIN)
1549                                 goto retry_state;
1550                         xd->state = XDOMAIN_STATE_ERROR;
1551                 } else {
1552                         tb_xdomain_queue_properties_changed(xd);
1553                         if (xd->bonding_possible)
1554                                 tb_xdomain_queue_link_status(xd);
1555                         else
1556                                 tb_xdomain_queue_properties(xd);
1557                 }
1558                 break;
1559
1560         case XDOMAIN_STATE_LINK_STATUS:
1561                 ret = tb_xdomain_get_link_status(xd);
1562                 if (ret) {
1563                         if (ret == -EAGAIN)
1564                                 goto retry_state;
1565
1566                         /*
1567                          * If any of the lane bonding states fail we skip
1568                          * bonding completely and try to continue from
1569                          * reading properties.
1570                          */
1571                         tb_xdomain_queue_properties(xd);
1572                 } else {
1573                         tb_xdomain_queue_bonding(xd);
1574                 }
1575                 break;
1576
1577         case XDOMAIN_STATE_LINK_STATE_CHANGE:
1578                 ret = tb_xdomain_link_state_change(xd, 2);
1579                 if (ret) {
1580                         if (ret == -EAGAIN)
1581                                 goto retry_state;
1582                         tb_xdomain_queue_properties(xd);
1583                 } else {
1584                         tb_xdomain_queue_link_status2(xd);
1585                 }
1586                 break;
1587
1588         case XDOMAIN_STATE_LINK_STATUS2:
1589                 ret = tb_xdomain_get_link_status(xd);
1590                 if (ret) {
1591                         if (ret == -EAGAIN)
1592                                 goto retry_state;
1593                         tb_xdomain_queue_properties(xd);
1594                 } else {
1595                         tb_xdomain_queue_bonding_uuid_low(xd);
1596                 }
1597                 break;
1598
1599         case XDOMAIN_STATE_BONDING_UUID_LOW:
1600                 tb_xdomain_lane_bonding_enable(xd);
1601                 tb_xdomain_queue_properties(xd);
1602                 break;
1603
1604         case XDOMAIN_STATE_BONDING_UUID_HIGH:
1605                 if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
1606                         goto retry_state;
1607                 tb_xdomain_queue_properties(xd);
1608                 break;
1609
1610         case XDOMAIN_STATE_PROPERTIES:
1611                 ret = tb_xdomain_get_properties(xd);
1612                 if (ret) {
1613                         if (ret == -EAGAIN)
1614                                 goto retry_state;
1615                         xd->state = XDOMAIN_STATE_ERROR;
1616                 } else {
1617                         xd->state = XDOMAIN_STATE_ENUMERATED;
1618                 }
1619                 break;
1620
1621         case XDOMAIN_STATE_ENUMERATED:
1622                 tb_xdomain_queue_properties(xd);
1623                 break;
1624
1625         case XDOMAIN_STATE_ERROR:
1626                 break;
1627
1628         default:
1629                 dev_warn(&xd->dev, "unexpected state %d\n", state);
1630                 break;
1631         }
1632
1633         return;
1634
1635 retry_state:
1636         queue_delayed_work(xd->tb->wq, &xd->state_work,
1637                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1638 }
1639
1640 static void tb_xdomain_properties_changed(struct work_struct *work)
1641 {
1642         struct tb_xdomain *xd = container_of(work, typeof(*xd),
1643                                              properties_changed_work.work);
1644         int ret;
1645
1646         dev_dbg(&xd->dev, "sending properties changed notification\n");
1647
1648         ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1649                                 xd->properties_changed_retries, xd->local_uuid);
1650         if (ret) {
1651                 if (xd->properties_changed_retries-- > 0) {
1652                         dev_dbg(&xd->dev,
1653                                 "failed to send properties changed notification, retrying\n");
1654                         queue_delayed_work(xd->tb->wq,
1655                                            &xd->properties_changed_work,
1656                                            msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1657                 }
1658                 dev_err(&xd->dev, "failed to send properties changed notification\n");
1659                 return;
1660         }
1661
1662         xd->properties_changed_retries = XDOMAIN_RETRIES;
1663 }
1664
1665 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1666                            char *buf)
1667 {
1668         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1669
1670         return sysfs_emit(buf, "%#x\n", xd->device);
1671 }
1672 static DEVICE_ATTR_RO(device);
1673
1674 static ssize_t
1675 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1676 {
1677         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1678         int ret;
1679
1680         if (mutex_lock_interruptible(&xd->lock))
1681                 return -ERESTARTSYS;
1682         ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
1683         mutex_unlock(&xd->lock);
1684
1685         return ret;
1686 }
1687 static DEVICE_ATTR_RO(device_name);
1688
1689 static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
1690                              char *buf)
1691 {
1692         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1693
1694         return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
1695 }
1696 static DEVICE_ATTR_RO(maxhopid);
1697
1698 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1699                            char *buf)
1700 {
1701         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1702
1703         return sysfs_emit(buf, "%#x\n", xd->vendor);
1704 }
1705 static DEVICE_ATTR_RO(vendor);
1706
1707 static ssize_t
1708 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1709 {
1710         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1711         int ret;
1712
1713         if (mutex_lock_interruptible(&xd->lock))
1714                 return -ERESTARTSYS;
1715         ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
1716         mutex_unlock(&xd->lock);
1717
1718         return ret;
1719 }
1720 static DEVICE_ATTR_RO(vendor_name);
1721
1722 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1723                               char *buf)
1724 {
1725         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1726
1727         return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
1728 }
1729 static DEVICE_ATTR_RO(unique_id);
1730
1731 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1732                           char *buf)
1733 {
1734         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1735
1736         return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
1737 }
1738
1739 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1740 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1741
1742 static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
1743                              char *buf)
1744 {
1745         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1746         unsigned int width;
1747
1748         switch (xd->link_width) {
1749         case TB_LINK_WIDTH_SINGLE:
1750         case TB_LINK_WIDTH_ASYM_RX:
1751                 width = 1;
1752                 break;
1753         case TB_LINK_WIDTH_DUAL:
1754                 width = 2;
1755                 break;
1756         case TB_LINK_WIDTH_ASYM_TX:
1757                 width = 3;
1758                 break;
1759         default:
1760                 WARN_ON_ONCE(1);
1761                 return -EINVAL;
1762         }
1763
1764         return sysfs_emit(buf, "%u\n", width);
1765 }
1766 static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
1767
1768 static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
1769                              char *buf)
1770 {
1771         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1772         unsigned int width;
1773
1774         switch (xd->link_width) {
1775         case TB_LINK_WIDTH_SINGLE:
1776         case TB_LINK_WIDTH_ASYM_TX:
1777                 width = 1;
1778                 break;
1779         case TB_LINK_WIDTH_DUAL:
1780                 width = 2;
1781                 break;
1782         case TB_LINK_WIDTH_ASYM_RX:
1783                 width = 3;
1784                 break;
1785         default:
1786                 WARN_ON_ONCE(1);
1787                 return -EINVAL;
1788         }
1789
1790         return sysfs_emit(buf, "%u\n", width);
1791 }
1792 static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
1793
1794 static struct attribute *xdomain_attrs[] = {
1795         &dev_attr_device.attr,
1796         &dev_attr_device_name.attr,
1797         &dev_attr_maxhopid.attr,
1798         &dev_attr_rx_lanes.attr,
1799         &dev_attr_rx_speed.attr,
1800         &dev_attr_tx_lanes.attr,
1801         &dev_attr_tx_speed.attr,
1802         &dev_attr_unique_id.attr,
1803         &dev_attr_vendor.attr,
1804         &dev_attr_vendor_name.attr,
1805         NULL,
1806 };
1807
1808 static const struct attribute_group xdomain_attr_group = {
1809         .attrs = xdomain_attrs,
1810 };
1811
1812 static const struct attribute_group *xdomain_attr_groups[] = {
1813         &xdomain_attr_group,
1814         NULL,
1815 };
1816
1817 static void tb_xdomain_release(struct device *dev)
1818 {
1819         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1820
1821         put_device(xd->dev.parent);
1822
1823         kfree(xd->local_property_block);
1824         tb_property_free_dir(xd->remote_properties);
1825         ida_destroy(&xd->out_hopids);
1826         ida_destroy(&xd->in_hopids);
1827         ida_destroy(&xd->service_ids);
1828
1829         kfree(xd->local_uuid);
1830         kfree(xd->remote_uuid);
1831         kfree(xd->device_name);
1832         kfree(xd->vendor_name);
1833         kfree(xd);
1834 }
1835
1836 static void start_handshake(struct tb_xdomain *xd)
1837 {
1838         xd->state = XDOMAIN_STATE_INIT;
1839         queue_delayed_work(xd->tb->wq, &xd->state_work,
1840                            msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1841 }
1842
1843 static void stop_handshake(struct tb_xdomain *xd)
1844 {
1845         cancel_delayed_work_sync(&xd->properties_changed_work);
1846         cancel_delayed_work_sync(&xd->state_work);
1847         xd->properties_changed_retries = 0;
1848         xd->state_retries = 0;
1849 }
1850
1851 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1852 {
1853         stop_handshake(tb_to_xdomain(dev));
1854         return 0;
1855 }
1856
1857 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1858 {
1859         start_handshake(tb_to_xdomain(dev));
1860         return 0;
1861 }
1862
1863 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1864         SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1865 };
1866
1867 struct device_type tb_xdomain_type = {
1868         .name = "thunderbolt_xdomain",
1869         .release = tb_xdomain_release,
1870         .pm = &tb_xdomain_pm_ops,
1871 };
1872 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1873
1874 /**
1875  * tb_xdomain_alloc() - Allocate new XDomain object
1876  * @tb: Domain where the XDomain belongs
1877  * @parent: Parent device (the switch through the connection to the
1878  *          other domain is reached).
1879  * @route: Route string used to reach the other domain
1880  * @local_uuid: Our local domain UUID
1881  * @remote_uuid: UUID of the other domain (optional)
1882  *
1883  * Allocates new XDomain structure and returns pointer to that. The
1884  * object must be released by calling tb_xdomain_put().
1885  */
1886 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1887                                     u64 route, const uuid_t *local_uuid,
1888                                     const uuid_t *remote_uuid)
1889 {
1890         struct tb_switch *parent_sw = tb_to_switch(parent);
1891         struct tb_xdomain *xd;
1892         struct tb_port *down;
1893
1894         /* Make sure the downstream domain is accessible */
1895         down = tb_port_at(route, parent_sw);
1896         tb_port_unlock(down);
1897
1898         xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1899         if (!xd)
1900                 return NULL;
1901
1902         xd->tb = tb;
1903         xd->route = route;
1904         xd->local_max_hopid = down->config.max_in_hop_id;
1905         ida_init(&xd->service_ids);
1906         ida_init(&xd->in_hopids);
1907         ida_init(&xd->out_hopids);
1908         mutex_init(&xd->lock);
1909         INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
1910         INIT_DELAYED_WORK(&xd->properties_changed_work,
1911                           tb_xdomain_properties_changed);
1912
1913         xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1914         if (!xd->local_uuid)
1915                 goto err_free;
1916
1917         if (remote_uuid) {
1918                 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1919                                           GFP_KERNEL);
1920                 if (!xd->remote_uuid)
1921                         goto err_free_local_uuid;
1922         } else {
1923                 xd->needs_uuid = true;
1924                 xd->bonding_possible = !!down->dual_link_port;
1925         }
1926
1927         device_initialize(&xd->dev);
1928         xd->dev.parent = get_device(parent);
1929         xd->dev.bus = &tb_bus_type;
1930         xd->dev.type = &tb_xdomain_type;
1931         xd->dev.groups = xdomain_attr_groups;
1932         dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1933
1934         dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
1935         if (remote_uuid)
1936                 dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
1937
1938         /*
1939          * This keeps the DMA powered on as long as we have active
1940          * connection to another host.
1941          */
1942         pm_runtime_set_active(&xd->dev);
1943         pm_runtime_get_noresume(&xd->dev);
1944         pm_runtime_enable(&xd->dev);
1945
1946         return xd;
1947
1948 err_free_local_uuid:
1949         kfree(xd->local_uuid);
1950 err_free:
1951         kfree(xd);
1952
1953         return NULL;
1954 }
1955
1956 /**
1957  * tb_xdomain_add() - Add XDomain to the bus
1958  * @xd: XDomain to add
1959  *
1960  * This function starts XDomain discovery protocol handshake and
1961  * eventually adds the XDomain to the bus. After calling this function
1962  * the caller needs to call tb_xdomain_remove() in order to remove and
1963  * release the object regardless whether the handshake succeeded or not.
1964  */
1965 void tb_xdomain_add(struct tb_xdomain *xd)
1966 {
1967         /* Start exchanging properties with the other host */
1968         start_handshake(xd);
1969 }
1970
1971 static int unregister_service(struct device *dev, void *data)
1972 {
1973         device_unregister(dev);
1974         return 0;
1975 }
1976
1977 /**
1978  * tb_xdomain_remove() - Remove XDomain from the bus
1979  * @xd: XDomain to remove
1980  *
1981  * This will stop all ongoing configuration work and remove the XDomain
1982  * along with any services from the bus. When the last reference to @xd
1983  * is released the object will be released as well.
1984  */
1985 void tb_xdomain_remove(struct tb_xdomain *xd)
1986 {
1987         tb_xdomain_debugfs_remove(xd);
1988
1989         stop_handshake(xd);
1990
1991         device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1992
1993         /*
1994          * Undo runtime PM here explicitly because it is possible that
1995          * the XDomain was never added to the bus and thus device_del()
1996          * is not called for it (device_del() would handle this otherwise).
1997          */
1998         pm_runtime_disable(&xd->dev);
1999         pm_runtime_put_noidle(&xd->dev);
2000         pm_runtime_set_suspended(&xd->dev);
2001
2002         if (!device_is_registered(&xd->dev)) {
2003                 put_device(&xd->dev);
2004         } else {
2005                 dev_info(&xd->dev, "host disconnected\n");
2006                 device_unregister(&xd->dev);
2007         }
2008 }
2009
2010 /**
2011  * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
2012  * @xd: XDomain connection
2013  *
2014  * Lane bonding is disabled by default for XDomains. This function tries
2015  * to enable bonding by first enabling the port and waiting for the CL0
2016  * state.
2017  *
2018  * Return: %0 in case of success and negative errno in case of error.
2019  */
2020 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
2021 {
2022         unsigned int width_mask;
2023         struct tb_port *port;
2024         int ret;
2025
2026         port = tb_xdomain_downstream_port(xd);
2027         if (!port->dual_link_port)
2028                 return -ENODEV;
2029
2030         ret = tb_port_enable(port->dual_link_port);
2031         if (ret)
2032                 return ret;
2033
2034         ret = tb_wait_for_port(port->dual_link_port, true);
2035         if (ret < 0)
2036                 return ret;
2037         if (!ret)
2038                 return -ENOTCONN;
2039
2040         ret = tb_port_lane_bonding_enable(port);
2041         if (ret) {
2042                 tb_port_warn(port, "failed to enable lane bonding\n");
2043                 return ret;
2044         }
2045
2046         /* Any of the widths are all bonded */
2047         width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
2048                      TB_LINK_WIDTH_ASYM_RX;
2049
2050         ret = tb_port_wait_for_link_width(port, width_mask,
2051                                           XDOMAIN_BONDING_TIMEOUT);
2052         if (ret) {
2053                 tb_port_warn(port, "failed to enable lane bonding\n");
2054                 return ret;
2055         }
2056
2057         tb_port_update_credits(port);
2058         tb_xdomain_update_link_attributes(xd);
2059
2060         dev_dbg(&xd->dev, "lane bonding enabled\n");
2061         return 0;
2062 }
2063 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
2064
2065 /**
2066  * tb_xdomain_lane_bonding_disable() - Disable lane bonding
2067  * @xd: XDomain connection
2068  *
2069  * Lane bonding is disabled by default for XDomains. If bonding has been
2070  * enabled, this function can be used to disable it.
2071  */
2072 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
2073 {
2074         struct tb_port *port;
2075
2076         port = tb_xdomain_downstream_port(xd);
2077         if (port->dual_link_port) {
2078                 int ret;
2079
2080                 tb_port_lane_bonding_disable(port);
2081                 ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
2082                 if (ret == -ETIMEDOUT)
2083                         tb_port_warn(port, "timeout disabling lane bonding\n");
2084                 tb_port_disable(port->dual_link_port);
2085                 tb_port_update_credits(port);
2086                 tb_xdomain_update_link_attributes(xd);
2087
2088                 dev_dbg(&xd->dev, "lane bonding disabled\n");
2089         }
2090 }
2091 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
2092
2093 /**
2094  * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
2095  * @xd: XDomain connection
2096  * @hopid: Preferred HopID or %-1 for next available
2097  *
2098  * Returns allocated HopID or negative errno. Specifically returns
2099  * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2100  * guaranteed to be within range supported by the input lane adapter.
2101  * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2102  */
2103 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
2104 {
2105         if (hopid < 0)
2106                 hopid = TB_PATH_MIN_HOPID;
2107         if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
2108                 return -EINVAL;
2109
2110         return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
2111                                GFP_KERNEL);
2112 }
2113 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
2114
2115 /**
2116  * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
2117  * @xd: XDomain connection
2118  * @hopid: Preferred HopID or %-1 for next available
2119  *
2120  * Returns allocated HopID or negative errno. Specifically returns
2121  * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2122  * guaranteed to be within range supported by the output lane adapter.
2123  * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2124  */
2125 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
2126 {
2127         if (hopid < 0)
2128                 hopid = TB_PATH_MIN_HOPID;
2129         if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
2130                 return -EINVAL;
2131
2132         return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
2133                                GFP_KERNEL);
2134 }
2135 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
2136
2137 /**
2138  * tb_xdomain_release_in_hopid() - Release input HopID
2139  * @xd: XDomain connection
2140  * @hopid: HopID to release
2141  */
2142 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
2143 {
2144         ida_free(&xd->in_hopids, hopid);
2145 }
2146 EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
2147
2148 /**
2149  * tb_xdomain_release_out_hopid() - Release output HopID
2150  * @xd: XDomain connection
2151  * @hopid: HopID to release
2152  */
2153 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
2154 {
2155         ida_free(&xd->out_hopids, hopid);
2156 }
2157 EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
2158
2159 /**
2160  * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
2161  * @xd: XDomain connection
2162  * @transmit_path: HopID we are using to send out packets
2163  * @transmit_ring: DMA ring used to send out packets
2164  * @receive_path: HopID the other end is using to send packets to us
2165  * @receive_ring: DMA ring used to receive packets from @receive_path
2166  *
2167  * The function enables DMA paths accordingly so that after successful
2168  * return the caller can send and receive packets using high-speed DMA
2169  * path. If a transmit or receive path is not needed, pass %-1 for those
2170  * parameters.
2171  *
2172  * Return: %0 in case of success and negative errno in case of error
2173  */
2174 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
2175                             int transmit_ring, int receive_path,
2176                             int receive_ring)
2177 {
2178         return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
2179                                                transmit_ring, receive_path,
2180                                                receive_ring);
2181 }
2182 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
2183
2184 /**
2185  * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
2186  * @xd: XDomain connection
2187  * @transmit_path: HopID we are using to send out packets
2188  * @transmit_ring: DMA ring used to send out packets
2189  * @receive_path: HopID the other end is using to send packets to us
2190  * @receive_ring: DMA ring used to receive packets from @receive_path
2191  *
2192  * This does the opposite of tb_xdomain_enable_paths(). After call to
2193  * this the caller is not expected to use the rings anymore. Passing %-1
2194  * as path/ring parameter means don't care. Normally the callers should
2195  * pass the same values here as they do when paths are enabled.
2196  *
2197  * Return: %0 in case of success and negative errno in case of error
2198  */
2199 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
2200                              int transmit_ring, int receive_path,
2201                              int receive_ring)
2202 {
2203         return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
2204                                                   transmit_ring, receive_path,
2205                                                   receive_ring);
2206 }
2207 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
2208
2209 struct tb_xdomain_lookup {
2210         const uuid_t *uuid;
2211         u8 link;
2212         u8 depth;
2213         u64 route;
2214 };
2215
2216 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
2217         const struct tb_xdomain_lookup *lookup)
2218 {
2219         struct tb_port *port;
2220
2221         tb_switch_for_each_port(sw, port) {
2222                 struct tb_xdomain *xd;
2223
2224                 if (port->xdomain) {
2225                         xd = port->xdomain;
2226
2227                         if (lookup->uuid) {
2228                                 if (xd->remote_uuid &&
2229                                     uuid_equal(xd->remote_uuid, lookup->uuid))
2230                                         return xd;
2231                         } else {
2232                                 if (lookup->link && lookup->link == xd->link &&
2233                                     lookup->depth == xd->depth)
2234                                         return xd;
2235                                 if (lookup->route && lookup->route == xd->route)
2236                                         return xd;
2237                         }
2238                 } else if (tb_port_has_remote(port)) {
2239                         xd = switch_find_xdomain(port->remote->sw, lookup);
2240                         if (xd)
2241                                 return xd;
2242                 }
2243         }
2244
2245         return NULL;
2246 }
2247
2248 /**
2249  * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
2250  * @tb: Domain where the XDomain belongs to
2251  * @uuid: UUID to look for
2252  *
2253  * Finds XDomain by walking through the Thunderbolt topology below @tb.
2254  * The returned XDomain will have its reference count increased so the
2255  * caller needs to call tb_xdomain_put() when it is done with the
2256  * object.
2257  *
2258  * This will find all XDomains including the ones that are not yet added
2259  * to the bus (handshake is still in progress).
2260  *
2261  * The caller needs to hold @tb->lock.
2262  */
2263 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2264 {
2265         struct tb_xdomain_lookup lookup;
2266         struct tb_xdomain *xd;
2267
2268         memset(&lookup, 0, sizeof(lookup));
2269         lookup.uuid = uuid;
2270
2271         xd = switch_find_xdomain(tb->root_switch, &lookup);
2272         return tb_xdomain_get(xd);
2273 }
2274 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
2275
2276 /**
2277  * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
2278  * @tb: Domain where the XDomain belongs to
2279  * @link: Root switch link number
2280  * @depth: Depth in the link
2281  *
2282  * Finds XDomain by walking through the Thunderbolt topology below @tb.
2283  * The returned XDomain will have its reference count increased so the
2284  * caller needs to call tb_xdomain_put() when it is done with the
2285  * object.
2286  *
2287  * This will find all XDomains including the ones that are not yet added
2288  * to the bus (handshake is still in progress).
2289  *
2290  * The caller needs to hold @tb->lock.
2291  */
2292 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
2293                                                  u8 depth)
2294 {
2295         struct tb_xdomain_lookup lookup;
2296         struct tb_xdomain *xd;
2297
2298         memset(&lookup, 0, sizeof(lookup));
2299         lookup.link = link;
2300         lookup.depth = depth;
2301
2302         xd = switch_find_xdomain(tb->root_switch, &lookup);
2303         return tb_xdomain_get(xd);
2304 }
2305
2306 /**
2307  * tb_xdomain_find_by_route() - Find an XDomain by route string
2308  * @tb: Domain where the XDomain belongs to
2309  * @route: XDomain route string
2310  *
2311  * Finds XDomain by walking through the Thunderbolt topology below @tb.
2312  * The returned XDomain will have its reference count increased so the
2313  * caller needs to call tb_xdomain_put() when it is done with the
2314  * object.
2315  *
2316  * This will find all XDomains including the ones that are not yet added
2317  * to the bus (handshake is still in progress).
2318  *
2319  * The caller needs to hold @tb->lock.
2320  */
2321 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
2322 {
2323         struct tb_xdomain_lookup lookup;
2324         struct tb_xdomain *xd;
2325
2326         memset(&lookup, 0, sizeof(lookup));
2327         lookup.route = route;
2328
2329         xd = switch_find_xdomain(tb->root_switch, &lookup);
2330         return tb_xdomain_get(xd);
2331 }
2332 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
2333
2334 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
2335                                const void *buf, size_t size)
2336 {
2337         const struct tb_protocol_handler *handler, *tmp;
2338         const struct tb_xdp_header *hdr = buf;
2339         unsigned int length;
2340         int ret = 0;
2341
2342         /* We expect the packet is at least size of the header */
2343         length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
2344         if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
2345                 return true;
2346         if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
2347                 return true;
2348
2349         /*
2350          * Handle XDomain discovery protocol packets directly here. For
2351          * other protocols (based on their UUID) we call registered
2352          * handlers in turn.
2353          */
2354         if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
2355                 if (type == TB_CFG_PKG_XDOMAIN_REQ)
2356                         return tb_xdp_schedule_request(tb, hdr, size);
2357                 return false;
2358         }
2359
2360         mutex_lock(&xdomain_lock);
2361         list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
2362                 if (!uuid_equal(&hdr->uuid, handler->uuid))
2363                         continue;
2364
2365                 mutex_unlock(&xdomain_lock);
2366                 ret = handler->callback(buf, size, handler->data);
2367                 mutex_lock(&xdomain_lock);
2368
2369                 if (ret)
2370                         break;
2371         }
2372         mutex_unlock(&xdomain_lock);
2373
2374         return ret > 0;
2375 }
2376
2377 static int update_xdomain(struct device *dev, void *data)
2378 {
2379         struct tb_xdomain *xd;
2380
2381         xd = tb_to_xdomain(dev);
2382         if (xd) {
2383                 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
2384                                    msecs_to_jiffies(50));
2385         }
2386
2387         return 0;
2388 }
2389
2390 static void update_all_xdomains(void)
2391 {
2392         bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
2393 }
2394
2395 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
2396 {
2397         struct tb_property *p;
2398
2399         p = tb_property_find(xdomain_property_dir, key,
2400                              TB_PROPERTY_TYPE_DIRECTORY);
2401         if (p && p->value.dir == dir) {
2402                 tb_property_remove(p);
2403                 return true;
2404         }
2405         return false;
2406 }
2407
2408 /**
2409  * tb_register_property_dir() - Register property directory to the host
2410  * @key: Key (name) of the directory to add
2411  * @dir: Directory to add
2412  *
2413  * Service drivers can use this function to add new property directory
2414  * to the host available properties. The other connected hosts are
2415  * notified so they can re-read properties of this host if they are
2416  * interested.
2417  *
2418  * Return: %0 on success and negative errno on failure
2419  */
2420 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
2421 {
2422         int ret;
2423
2424         if (WARN_ON(!xdomain_property_dir))
2425                 return -EAGAIN;
2426
2427         if (!key || strlen(key) > 8)
2428                 return -EINVAL;
2429
2430         mutex_lock(&xdomain_lock);
2431         if (tb_property_find(xdomain_property_dir, key,
2432                              TB_PROPERTY_TYPE_DIRECTORY)) {
2433                 ret = -EEXIST;
2434                 goto err_unlock;
2435         }
2436
2437         ret = tb_property_add_dir(xdomain_property_dir, key, dir);
2438         if (ret)
2439                 goto err_unlock;
2440
2441         xdomain_property_block_gen++;
2442
2443         mutex_unlock(&xdomain_lock);
2444         update_all_xdomains();
2445         return 0;
2446
2447 err_unlock:
2448         mutex_unlock(&xdomain_lock);
2449         return ret;
2450 }
2451 EXPORT_SYMBOL_GPL(tb_register_property_dir);
2452
2453 /**
2454  * tb_unregister_property_dir() - Removes property directory from host
2455  * @key: Key (name) of the directory
2456  * @dir: Directory to remove
2457  *
2458  * This will remove the existing directory from this host and notify the
2459  * connected hosts about the change.
2460  */
2461 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
2462 {
2463         int ret = 0;
2464
2465         mutex_lock(&xdomain_lock);
2466         if (remove_directory(key, dir))
2467                 xdomain_property_block_gen++;
2468         mutex_unlock(&xdomain_lock);
2469
2470         if (!ret)
2471                 update_all_xdomains();
2472 }
2473 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
2474
2475 int tb_xdomain_init(void)
2476 {
2477         xdomain_property_dir = tb_property_create_dir(NULL);
2478         if (!xdomain_property_dir)
2479                 return -ENOMEM;
2480
2481         /*
2482          * Initialize standard set of properties without any service
2483          * directories. Those will be added by service drivers
2484          * themselves when they are loaded.
2485          *
2486          * Rest of the properties are filled dynamically based on these
2487          * when the P2P connection is made.
2488          */
2489         tb_property_add_immediate(xdomain_property_dir, "vendorid",
2490                                   PCI_VENDOR_ID_INTEL);
2491         tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
2492         tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
2493         tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
2494
2495         xdomain_property_block_gen = get_random_u32();
2496         return 0;
2497 }
2498
2499 void tb_xdomain_exit(void)
2500 {
2501         tb_property_free_dir(xdomain_property_dir);
2502 }