Merge tag 'for-linus-4.18-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / base / dd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/dd.c - The core device/driver interactions.
4  *
5  * This file contains the (sometimes tricky) code that controls the
6  * interactions between devices and drivers, which primarily includes
7  * driver binding and unbinding.
8  *
9  * All of this code used to exist in drivers/base/bus.c, but was
10  * relocated to here in the name of compartmentalization (since it wasn't
11  * strictly code just for the 'struct bus_type'.
12  *
13  * Copyright (c) 2002-5 Patrick Mochel
14  * Copyright (c) 2002-3 Open Source Development Labs
15  * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
16  * Copyright (c) 2007-2009 Novell Inc.
17  */
18
19 #include <linux/device.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/async.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pinctrl/devinfo.h>
29
30 #include "base.h"
31 #include "power/power.h"
32
33 /*
34  * Deferred Probe infrastructure.
35  *
36  * Sometimes driver probe order matters, but the kernel doesn't always have
37  * dependency information which means some drivers will get probed before a
38  * resource it depends on is available.  For example, an SDHCI driver may
39  * first need a GPIO line from an i2c GPIO controller before it can be
40  * initialized.  If a required resource is not available yet, a driver can
41  * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
42  *
43  * Deferred probe maintains two lists of devices, a pending list and an active
44  * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
45  * pending list.  A successful driver probe will trigger moving all devices
46  * from the pending to the active list so that the workqueue will eventually
47  * retry them.
48  *
49  * The deferred_probe_mutex must be held any time the deferred_probe_*_list
50  * of the (struct device*)->p->deferred_probe pointers are manipulated
51  */
52 static DEFINE_MUTEX(deferred_probe_mutex);
53 static LIST_HEAD(deferred_probe_pending_list);
54 static LIST_HEAD(deferred_probe_active_list);
55 static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
56 static bool initcalls_done;
57
58 /*
59  * In some cases, like suspend to RAM or hibernation, It might be reasonable
60  * to prohibit probing of devices as it could be unsafe.
61  * Once defer_all_probes is true all drivers probes will be forcibly deferred.
62  */
63 static bool defer_all_probes;
64
65 /*
66  * For initcall_debug, show the deferred probes executed in late_initcall
67  * processing.
68  */
69 static void deferred_probe_debug(struct device *dev)
70 {
71         ktime_t calltime, delta, rettime;
72         unsigned long long duration;
73
74         printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
75                task_pid_nr(current));
76         calltime = ktime_get();
77         bus_probe_device(dev);
78         rettime = ktime_get();
79         delta = ktime_sub(rettime, calltime);
80         duration = (unsigned long long) ktime_to_ns(delta) >> 10;
81         printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
82                dev_name(dev), duration);
83 }
84
85 /*
86  * deferred_probe_work_func() - Retry probing devices in the active list.
87  */
88 static void deferred_probe_work_func(struct work_struct *work)
89 {
90         struct device *dev;
91         struct device_private *private;
92         /*
93          * This block processes every device in the deferred 'active' list.
94          * Each device is removed from the active list and passed to
95          * bus_probe_device() to re-attempt the probe.  The loop continues
96          * until every device in the active list is removed and retried.
97          *
98          * Note: Once the device is removed from the list and the mutex is
99          * released, it is possible for the device get freed by another thread
100          * and cause a illegal pointer dereference.  This code uses
101          * get/put_device() to ensure the device structure cannot disappear
102          * from under our feet.
103          */
104         mutex_lock(&deferred_probe_mutex);
105         while (!list_empty(&deferred_probe_active_list)) {
106                 private = list_first_entry(&deferred_probe_active_list,
107                                         typeof(*dev->p), deferred_probe);
108                 dev = private->device;
109                 list_del_init(&private->deferred_probe);
110
111                 get_device(dev);
112
113                 /*
114                  * Drop the mutex while probing each device; the probe path may
115                  * manipulate the deferred list
116                  */
117                 mutex_unlock(&deferred_probe_mutex);
118
119                 /*
120                  * Force the device to the end of the dpm_list since
121                  * the PM code assumes that the order we add things to
122                  * the list is a good order for suspend but deferred
123                  * probe makes that very unsafe.
124                  */
125                 device_pm_move_to_tail(dev);
126
127                 dev_dbg(dev, "Retrying from deferred list\n");
128                 if (initcall_debug && !initcalls_done)
129                         deferred_probe_debug(dev);
130                 else
131                         bus_probe_device(dev);
132
133                 mutex_lock(&deferred_probe_mutex);
134
135                 put_device(dev);
136         }
137         mutex_unlock(&deferred_probe_mutex);
138 }
139 static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
140
141 static void driver_deferred_probe_add(struct device *dev)
142 {
143         mutex_lock(&deferred_probe_mutex);
144         if (list_empty(&dev->p->deferred_probe)) {
145                 dev_dbg(dev, "Added to deferred list\n");
146                 list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
147         }
148         mutex_unlock(&deferred_probe_mutex);
149 }
150
151 void driver_deferred_probe_del(struct device *dev)
152 {
153         mutex_lock(&deferred_probe_mutex);
154         if (!list_empty(&dev->p->deferred_probe)) {
155                 dev_dbg(dev, "Removed from deferred list\n");
156                 list_del_init(&dev->p->deferred_probe);
157         }
158         mutex_unlock(&deferred_probe_mutex);
159 }
160
161 static bool driver_deferred_probe_enable = false;
162 /**
163  * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
164  *
165  * This functions moves all devices from the pending list to the active
166  * list and schedules the deferred probe workqueue to process them.  It
167  * should be called anytime a driver is successfully bound to a device.
168  *
169  * Note, there is a race condition in multi-threaded probe. In the case where
170  * more than one device is probing at the same time, it is possible for one
171  * probe to complete successfully while another is about to defer. If the second
172  * depends on the first, then it will get put on the pending list after the
173  * trigger event has already occurred and will be stuck there.
174  *
175  * The atomic 'deferred_trigger_count' is used to determine if a successful
176  * trigger has occurred in the midst of probing a driver. If the trigger count
177  * changes in the midst of a probe, then deferred processing should be triggered
178  * again.
179  */
180 static void driver_deferred_probe_trigger(void)
181 {
182         if (!driver_deferred_probe_enable)
183                 return;
184
185         /*
186          * A successful probe means that all the devices in the pending list
187          * should be triggered to be reprobed.  Move all the deferred devices
188          * into the active list so they can be retried by the workqueue
189          */
190         mutex_lock(&deferred_probe_mutex);
191         atomic_inc(&deferred_trigger_count);
192         list_splice_tail_init(&deferred_probe_pending_list,
193                               &deferred_probe_active_list);
194         mutex_unlock(&deferred_probe_mutex);
195
196         /*
197          * Kick the re-probe thread.  It may already be scheduled, but it is
198          * safe to kick it again.
199          */
200         schedule_work(&deferred_probe_work);
201 }
202
203 /**
204  * device_block_probing() - Block/defere device's probes
205  *
206  *      It will disable probing of devices and defer their probes instead.
207  */
208 void device_block_probing(void)
209 {
210         defer_all_probes = true;
211         /* sync with probes to avoid races. */
212         wait_for_device_probe();
213 }
214
215 /**
216  * device_unblock_probing() - Unblock/enable device's probes
217  *
218  *      It will restore normal behavior and trigger re-probing of deferred
219  * devices.
220  */
221 void device_unblock_probing(void)
222 {
223         defer_all_probes = false;
224         driver_deferred_probe_trigger();
225 }
226
227 /**
228  * deferred_probe_initcall() - Enable probing of deferred devices
229  *
230  * We don't want to get in the way when the bulk of drivers are getting probed.
231  * Instead, this initcall makes sure that deferred probing is delayed until
232  * late_initcall time.
233  */
234 static int deferred_probe_initcall(void)
235 {
236         driver_deferred_probe_enable = true;
237         driver_deferred_probe_trigger();
238         /* Sort as many dependencies as possible before exiting initcalls */
239         flush_work(&deferred_probe_work);
240         initcalls_done = true;
241         return 0;
242 }
243 late_initcall(deferred_probe_initcall);
244
245 /**
246  * device_is_bound() - Check if device is bound to a driver
247  * @dev: device to check
248  *
249  * Returns true if passed device has already finished probing successfully
250  * against a driver.
251  *
252  * This function must be called with the device lock held.
253  */
254 bool device_is_bound(struct device *dev)
255 {
256         return dev->p && klist_node_attached(&dev->p->knode_driver);
257 }
258
259 static void driver_bound(struct device *dev)
260 {
261         if (device_is_bound(dev)) {
262                 printk(KERN_WARNING "%s: device %s already bound\n",
263                         __func__, kobject_name(&dev->kobj));
264                 return;
265         }
266
267         pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
268                  __func__, dev_name(dev));
269
270         klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
271         device_links_driver_bound(dev);
272
273         device_pm_check_callbacks(dev);
274
275         /*
276          * Make sure the device is no longer in one of the deferred lists and
277          * kick off retrying all pending devices
278          */
279         driver_deferred_probe_del(dev);
280         driver_deferred_probe_trigger();
281
282         if (dev->bus)
283                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
284                                              BUS_NOTIFY_BOUND_DRIVER, dev);
285
286         kobject_uevent(&dev->kobj, KOBJ_BIND);
287 }
288
289 static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
290                             const char *buf, size_t count)
291 {
292         device_lock(dev);
293         dev->driver->coredump(dev);
294         device_unlock(dev);
295
296         return count;
297 }
298 static DEVICE_ATTR_WO(coredump);
299
300 static int driver_sysfs_add(struct device *dev)
301 {
302         int ret;
303
304         if (dev->bus)
305                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
306                                              BUS_NOTIFY_BIND_DRIVER, dev);
307
308         ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
309                                 kobject_name(&dev->kobj));
310         if (ret)
311                 goto fail;
312
313         ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
314                                 "driver");
315         if (ret)
316                 goto rm_dev;
317
318         if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
319             !device_create_file(dev, &dev_attr_coredump))
320                 return 0;
321
322         sysfs_remove_link(&dev->kobj, "driver");
323
324 rm_dev:
325         sysfs_remove_link(&dev->driver->p->kobj,
326                           kobject_name(&dev->kobj));
327
328 fail:
329         return ret;
330 }
331
332 static void driver_sysfs_remove(struct device *dev)
333 {
334         struct device_driver *drv = dev->driver;
335
336         if (drv) {
337                 if (drv->coredump)
338                         device_remove_file(dev, &dev_attr_coredump);
339                 sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
340                 sysfs_remove_link(&dev->kobj, "driver");
341         }
342 }
343
344 /**
345  * device_bind_driver - bind a driver to one device.
346  * @dev: device.
347  *
348  * Allow manual attachment of a driver to a device.
349  * Caller must have already set @dev->driver.
350  *
351  * Note that this does not modify the bus reference count
352  * nor take the bus's rwsem. Please verify those are accounted
353  * for before calling this. (It is ok to call with no other effort
354  * from a driver's probe() method.)
355  *
356  * This function must be called with the device lock held.
357  */
358 int device_bind_driver(struct device *dev)
359 {
360         int ret;
361
362         ret = driver_sysfs_add(dev);
363         if (!ret)
364                 driver_bound(dev);
365         else if (dev->bus)
366                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
367                                              BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
368         return ret;
369 }
370 EXPORT_SYMBOL_GPL(device_bind_driver);
371
372 static atomic_t probe_count = ATOMIC_INIT(0);
373 static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
374
375 static void driver_deferred_probe_add_trigger(struct device *dev,
376                                               int local_trigger_count)
377 {
378         driver_deferred_probe_add(dev);
379         /* Did a trigger occur while probing? Need to re-trigger if yes */
380         if (local_trigger_count != atomic_read(&deferred_trigger_count))
381                 driver_deferred_probe_trigger();
382 }
383
384 static int really_probe(struct device *dev, struct device_driver *drv)
385 {
386         int ret = -EPROBE_DEFER;
387         int local_trigger_count = atomic_read(&deferred_trigger_count);
388         bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
389                            !drv->suppress_bind_attrs;
390
391         if (defer_all_probes) {
392                 /*
393                  * Value of defer_all_probes can be set only by
394                  * device_defer_all_probes_enable() which, in turn, will call
395                  * wait_for_device_probe() right after that to avoid any races.
396                  */
397                 dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
398                 driver_deferred_probe_add(dev);
399                 return ret;
400         }
401
402         ret = device_links_check_suppliers(dev);
403         if (ret == -EPROBE_DEFER)
404                 driver_deferred_probe_add_trigger(dev, local_trigger_count);
405         if (ret)
406                 return ret;
407
408         atomic_inc(&probe_count);
409         pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
410                  drv->bus->name, __func__, drv->name, dev_name(dev));
411         WARN_ON(!list_empty(&dev->devres_head));
412
413 re_probe:
414         dev->driver = drv;
415
416         /* If using pinctrl, bind pins now before probing */
417         ret = pinctrl_bind_pins(dev);
418         if (ret)
419                 goto pinctrl_bind_failed;
420
421         ret = dma_configure(dev);
422         if (ret)
423                 goto dma_failed;
424
425         if (driver_sysfs_add(dev)) {
426                 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
427                         __func__, dev_name(dev));
428                 goto probe_failed;
429         }
430
431         if (dev->pm_domain && dev->pm_domain->activate) {
432                 ret = dev->pm_domain->activate(dev);
433                 if (ret)
434                         goto probe_failed;
435         }
436
437         /*
438          * Ensure devices are listed in devices_kset in correct order
439          * It's important to move Dev to the end of devices_kset before
440          * calling .probe, because it could be recursive and parent Dev
441          * should always go first
442          */
443         devices_kset_move_last(dev);
444
445         if (dev->bus->probe) {
446                 ret = dev->bus->probe(dev);
447                 if (ret)
448                         goto probe_failed;
449         } else if (drv->probe) {
450                 ret = drv->probe(dev);
451                 if (ret)
452                         goto probe_failed;
453         }
454
455         if (test_remove) {
456                 test_remove = false;
457
458                 if (dev->bus->remove)
459                         dev->bus->remove(dev);
460                 else if (drv->remove)
461                         drv->remove(dev);
462
463                 devres_release_all(dev);
464                 driver_sysfs_remove(dev);
465                 dev->driver = NULL;
466                 dev_set_drvdata(dev, NULL);
467                 if (dev->pm_domain && dev->pm_domain->dismiss)
468                         dev->pm_domain->dismiss(dev);
469                 pm_runtime_reinit(dev);
470
471                 goto re_probe;
472         }
473
474         pinctrl_init_done(dev);
475
476         if (dev->pm_domain && dev->pm_domain->sync)
477                 dev->pm_domain->sync(dev);
478
479         driver_bound(dev);
480         ret = 1;
481         pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
482                  drv->bus->name, __func__, dev_name(dev), drv->name);
483         goto done;
484
485 probe_failed:
486         dma_deconfigure(dev);
487 dma_failed:
488         if (dev->bus)
489                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
490                                              BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
491 pinctrl_bind_failed:
492         device_links_no_driver(dev);
493         devres_release_all(dev);
494         driver_sysfs_remove(dev);
495         dev->driver = NULL;
496         dev_set_drvdata(dev, NULL);
497         if (dev->pm_domain && dev->pm_domain->dismiss)
498                 dev->pm_domain->dismiss(dev);
499         pm_runtime_reinit(dev);
500         dev_pm_set_driver_flags(dev, 0);
501
502         switch (ret) {
503         case -EPROBE_DEFER:
504                 /* Driver requested deferred probing */
505                 dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
506                 driver_deferred_probe_add_trigger(dev, local_trigger_count);
507                 break;
508         case -ENODEV:
509         case -ENXIO:
510                 pr_debug("%s: probe of %s rejects match %d\n",
511                          drv->name, dev_name(dev), ret);
512                 break;
513         default:
514                 /* driver matched but the probe failed */
515                 printk(KERN_WARNING
516                        "%s: probe of %s failed with error %d\n",
517                        drv->name, dev_name(dev), ret);
518         }
519         /*
520          * Ignore errors returned by ->probe so that the next driver can try
521          * its luck.
522          */
523         ret = 0;
524 done:
525         atomic_dec(&probe_count);
526         wake_up(&probe_waitqueue);
527         return ret;
528 }
529
530 /**
531  * driver_probe_done
532  * Determine if the probe sequence is finished or not.
533  *
534  * Should somehow figure out how to use a semaphore, not an atomic variable...
535  */
536 int driver_probe_done(void)
537 {
538         pr_debug("%s: probe_count = %d\n", __func__,
539                  atomic_read(&probe_count));
540         if (atomic_read(&probe_count))
541                 return -EBUSY;
542         return 0;
543 }
544
545 /**
546  * wait_for_device_probe
547  * Wait for device probing to be completed.
548  */
549 void wait_for_device_probe(void)
550 {
551         /* wait for the deferred probe workqueue to finish */
552         flush_work(&deferred_probe_work);
553
554         /* wait for the known devices to complete their probing */
555         wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
556         async_synchronize_full();
557 }
558 EXPORT_SYMBOL_GPL(wait_for_device_probe);
559
560 /**
561  * driver_probe_device - attempt to bind device & driver together
562  * @drv: driver to bind a device to
563  * @dev: device to try to bind to the driver
564  *
565  * This function returns -ENODEV if the device is not registered,
566  * 1 if the device is bound successfully and 0 otherwise.
567  *
568  * This function must be called with @dev lock held.  When called for a
569  * USB interface, @dev->parent lock must be held as well.
570  *
571  * If the device has a parent, runtime-resume the parent before driver probing.
572  */
573 int driver_probe_device(struct device_driver *drv, struct device *dev)
574 {
575         int ret = 0;
576
577         if (!device_is_registered(dev))
578                 return -ENODEV;
579
580         pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
581                  drv->bus->name, __func__, dev_name(dev), drv->name);
582
583         pm_runtime_get_suppliers(dev);
584         if (dev->parent)
585                 pm_runtime_get_sync(dev->parent);
586
587         pm_runtime_barrier(dev);
588         ret = really_probe(dev, drv);
589         pm_request_idle(dev);
590
591         if (dev->parent)
592                 pm_runtime_put(dev->parent);
593
594         pm_runtime_put_suppliers(dev);
595         return ret;
596 }
597
598 bool driver_allows_async_probing(struct device_driver *drv)
599 {
600         switch (drv->probe_type) {
601         case PROBE_PREFER_ASYNCHRONOUS:
602                 return true;
603
604         case PROBE_FORCE_SYNCHRONOUS:
605                 return false;
606
607         default:
608                 if (module_requested_async_probing(drv->owner))
609                         return true;
610
611                 return false;
612         }
613 }
614
615 struct device_attach_data {
616         struct device *dev;
617
618         /*
619          * Indicates whether we are are considering asynchronous probing or
620          * not. Only initial binding after device or driver registration
621          * (including deferral processing) may be done asynchronously, the
622          * rest is always synchronous, as we expect it is being done by
623          * request from userspace.
624          */
625         bool check_async;
626
627         /*
628          * Indicates if we are binding synchronous or asynchronous drivers.
629          * When asynchronous probing is enabled we'll execute 2 passes
630          * over drivers: first pass doing synchronous probing and second
631          * doing asynchronous probing (if synchronous did not succeed -
632          * most likely because there was no driver requiring synchronous
633          * probing - and we found asynchronous driver during first pass).
634          * The 2 passes are done because we can't shoot asynchronous
635          * probe for given device and driver from bus_for_each_drv() since
636          * driver pointer is not guaranteed to stay valid once
637          * bus_for_each_drv() iterates to the next driver on the bus.
638          */
639         bool want_async;
640
641         /*
642          * We'll set have_async to 'true' if, while scanning for matching
643          * driver, we'll encounter one that requests asynchronous probing.
644          */
645         bool have_async;
646 };
647
648 static int __device_attach_driver(struct device_driver *drv, void *_data)
649 {
650         struct device_attach_data *data = _data;
651         struct device *dev = data->dev;
652         bool async_allowed;
653         int ret;
654
655         /*
656          * Check if device has already been claimed. This may
657          * happen with driver loading, device discovery/registration,
658          * and deferred probe processing happens all at once with
659          * multiple threads.
660          */
661         if (dev->driver)
662                 return -EBUSY;
663
664         ret = driver_match_device(drv, dev);
665         if (ret == 0) {
666                 /* no match */
667                 return 0;
668         } else if (ret == -EPROBE_DEFER) {
669                 dev_dbg(dev, "Device match requests probe deferral\n");
670                 driver_deferred_probe_add(dev);
671         } else if (ret < 0) {
672                 dev_dbg(dev, "Bus failed to match device: %d", ret);
673                 return ret;
674         } /* ret > 0 means positive match */
675
676         async_allowed = driver_allows_async_probing(drv);
677
678         if (async_allowed)
679                 data->have_async = true;
680
681         if (data->check_async && async_allowed != data->want_async)
682                 return 0;
683
684         return driver_probe_device(drv, dev);
685 }
686
687 static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
688 {
689         struct device *dev = _dev;
690         struct device_attach_data data = {
691                 .dev            = dev,
692                 .check_async    = true,
693                 .want_async     = true,
694         };
695
696         device_lock(dev);
697
698         if (dev->parent)
699                 pm_runtime_get_sync(dev->parent);
700
701         bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
702         dev_dbg(dev, "async probe completed\n");
703
704         pm_request_idle(dev);
705
706         if (dev->parent)
707                 pm_runtime_put(dev->parent);
708
709         device_unlock(dev);
710
711         put_device(dev);
712 }
713
714 static int __device_attach(struct device *dev, bool allow_async)
715 {
716         int ret = 0;
717
718         device_lock(dev);
719         if (dev->driver) {
720                 if (device_is_bound(dev)) {
721                         ret = 1;
722                         goto out_unlock;
723                 }
724                 ret = device_bind_driver(dev);
725                 if (ret == 0)
726                         ret = 1;
727                 else {
728                         dev->driver = NULL;
729                         ret = 0;
730                 }
731         } else {
732                 struct device_attach_data data = {
733                         .dev = dev,
734                         .check_async = allow_async,
735                         .want_async = false,
736                 };
737
738                 if (dev->parent)
739                         pm_runtime_get_sync(dev->parent);
740
741                 ret = bus_for_each_drv(dev->bus, NULL, &data,
742                                         __device_attach_driver);
743                 if (!ret && allow_async && data.have_async) {
744                         /*
745                          * If we could not find appropriate driver
746                          * synchronously and we are allowed to do
747                          * async probes and there are drivers that
748                          * want to probe asynchronously, we'll
749                          * try them.
750                          */
751                         dev_dbg(dev, "scheduling asynchronous probe\n");
752                         get_device(dev);
753                         async_schedule(__device_attach_async_helper, dev);
754                 } else {
755                         pm_request_idle(dev);
756                 }
757
758                 if (dev->parent)
759                         pm_runtime_put(dev->parent);
760         }
761 out_unlock:
762         device_unlock(dev);
763         return ret;
764 }
765
766 /**
767  * device_attach - try to attach device to a driver.
768  * @dev: device.
769  *
770  * Walk the list of drivers that the bus has and call
771  * driver_probe_device() for each pair. If a compatible
772  * pair is found, break out and return.
773  *
774  * Returns 1 if the device was bound to a driver;
775  * 0 if no matching driver was found;
776  * -ENODEV if the device is not registered.
777  *
778  * When called for a USB interface, @dev->parent lock must be held.
779  */
780 int device_attach(struct device *dev)
781 {
782         return __device_attach(dev, false);
783 }
784 EXPORT_SYMBOL_GPL(device_attach);
785
786 void device_initial_probe(struct device *dev)
787 {
788         __device_attach(dev, true);
789 }
790
791 static int __driver_attach(struct device *dev, void *data)
792 {
793         struct device_driver *drv = data;
794         int ret;
795
796         /*
797          * Lock device and try to bind to it. We drop the error
798          * here and always return 0, because we need to keep trying
799          * to bind to devices and some drivers will return an error
800          * simply if it didn't support the device.
801          *
802          * driver_probe_device() will spit a warning if there
803          * is an error.
804          */
805
806         ret = driver_match_device(drv, dev);
807         if (ret == 0) {
808                 /* no match */
809                 return 0;
810         } else if (ret == -EPROBE_DEFER) {
811                 dev_dbg(dev, "Device match requests probe deferral\n");
812                 driver_deferred_probe_add(dev);
813         } else if (ret < 0) {
814                 dev_dbg(dev, "Bus failed to match device: %d", ret);
815                 return ret;
816         } /* ret > 0 means positive match */
817
818         if (dev->parent && dev->bus->need_parent_lock)
819                 device_lock(dev->parent);
820         device_lock(dev);
821         if (!dev->driver)
822                 driver_probe_device(drv, dev);
823         device_unlock(dev);
824         if (dev->parent && dev->bus->need_parent_lock)
825                 device_unlock(dev->parent);
826
827         return 0;
828 }
829
830 /**
831  * driver_attach - try to bind driver to devices.
832  * @drv: driver.
833  *
834  * Walk the list of devices that the bus has on it and try to
835  * match the driver with each one.  If driver_probe_device()
836  * returns 0 and the @dev->driver is set, we've found a
837  * compatible pair.
838  */
839 int driver_attach(struct device_driver *drv)
840 {
841         return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
842 }
843 EXPORT_SYMBOL_GPL(driver_attach);
844
845 /*
846  * __device_release_driver() must be called with @dev lock held.
847  * When called for a USB interface, @dev->parent lock must be held as well.
848  */
849 static void __device_release_driver(struct device *dev, struct device *parent)
850 {
851         struct device_driver *drv;
852
853         drv = dev->driver;
854         if (drv) {
855                 if (driver_allows_async_probing(drv))
856                         async_synchronize_full();
857
858                 while (device_links_busy(dev)) {
859                         device_unlock(dev);
860                         if (parent)
861                                 device_unlock(parent);
862
863                         device_links_unbind_consumers(dev);
864                         if (parent)
865                                 device_lock(parent);
866
867                         device_lock(dev);
868                         /*
869                          * A concurrent invocation of the same function might
870                          * have released the driver successfully while this one
871                          * was waiting, so check for that.
872                          */
873                         if (dev->driver != drv)
874                                 return;
875                 }
876
877                 pm_runtime_get_sync(dev);
878                 pm_runtime_clean_up_links(dev);
879
880                 driver_sysfs_remove(dev);
881
882                 if (dev->bus)
883                         blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
884                                                      BUS_NOTIFY_UNBIND_DRIVER,
885                                                      dev);
886
887                 pm_runtime_put_sync(dev);
888
889                 if (dev->bus && dev->bus->remove)
890                         dev->bus->remove(dev);
891                 else if (drv->remove)
892                         drv->remove(dev);
893
894                 device_links_driver_cleanup(dev);
895                 dma_deconfigure(dev);
896
897                 devres_release_all(dev);
898                 dev->driver = NULL;
899                 dev_set_drvdata(dev, NULL);
900                 if (dev->pm_domain && dev->pm_domain->dismiss)
901                         dev->pm_domain->dismiss(dev);
902                 pm_runtime_reinit(dev);
903                 dev_pm_set_driver_flags(dev, 0);
904
905                 klist_remove(&dev->p->knode_driver);
906                 device_pm_check_callbacks(dev);
907                 if (dev->bus)
908                         blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
909                                                      BUS_NOTIFY_UNBOUND_DRIVER,
910                                                      dev);
911
912                 kobject_uevent(&dev->kobj, KOBJ_UNBIND);
913         }
914 }
915
916 void device_release_driver_internal(struct device *dev,
917                                     struct device_driver *drv,
918                                     struct device *parent)
919 {
920         if (parent && dev->bus->need_parent_lock)
921                 device_lock(parent);
922
923         device_lock(dev);
924         if (!drv || drv == dev->driver)
925                 __device_release_driver(dev, parent);
926
927         device_unlock(dev);
928         if (parent && dev->bus->need_parent_lock)
929                 device_unlock(parent);
930 }
931
932 /**
933  * device_release_driver - manually detach device from driver.
934  * @dev: device.
935  *
936  * Manually detach device from driver.
937  * When called for a USB interface, @dev->parent lock must be held.
938  *
939  * If this function is to be called with @dev->parent lock held, ensure that
940  * the device's consumers are unbound in advance or that their locks can be
941  * acquired under the @dev->parent lock.
942  */
943 void device_release_driver(struct device *dev)
944 {
945         /*
946          * If anyone calls device_release_driver() recursively from
947          * within their ->remove callback for the same device, they
948          * will deadlock right here.
949          */
950         device_release_driver_internal(dev, NULL, NULL);
951 }
952 EXPORT_SYMBOL_GPL(device_release_driver);
953
954 /**
955  * driver_detach - detach driver from all devices it controls.
956  * @drv: driver.
957  */
958 void driver_detach(struct device_driver *drv)
959 {
960         struct device_private *dev_prv;
961         struct device *dev;
962
963         for (;;) {
964                 spin_lock(&drv->p->klist_devices.k_lock);
965                 if (list_empty(&drv->p->klist_devices.k_list)) {
966                         spin_unlock(&drv->p->klist_devices.k_lock);
967                         break;
968                 }
969                 dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
970                                      struct device_private,
971                                      knode_driver.n_node);
972                 dev = dev_prv->device;
973                 get_device(dev);
974                 spin_unlock(&drv->p->klist_devices.k_lock);
975                 device_release_driver_internal(dev, drv, dev->parent);
976                 put_device(dev);
977         }
978 }