2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
34 * For coherent userptr handling registers an MMU notifier to inform the driver
35 * about updates on the page tables of a process.
37 * When somebody tries to invalidate the page tables we block the update until
38 * all operations on the pages in question are completed, then those pages are
39 * marked as accessed and also dirty if it wasn't a read only access.
41 * New command submissions using the userptrs in question are delayed until all
42 * page table invalidation are completed and we once more see a coherent process
46 #include <linux/firmware.h>
47 #include <linux/module.h>
51 #include "amdgpu_amdkfd.h"
54 * struct amdgpu_mn_node
56 * @it: interval node defining start-last of the affected address range
57 * @bos: list of all BOs in the affected address range
59 * Manages all BOs which are affected of a certain range of address space.
61 struct amdgpu_mn_node {
62 struct interval_tree_node it;
67 * amdgpu_mn_destroy - destroy the HMM mirror
69 * @work: previously sheduled work item
71 * Lazy destroys the notifier from a work item
73 static void amdgpu_mn_destroy(struct work_struct *work)
75 struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
76 struct amdgpu_device *adev = amn->adev;
77 struct amdgpu_mn_node *node, *next_node;
78 struct amdgpu_bo *bo, *next_bo;
80 mutex_lock(&adev->mn_lock);
81 down_write(&amn->lock);
83 rbtree_postorder_for_each_entry_safe(node, next_node,
84 &amn->objects.rb_root, it.rb) {
85 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
87 list_del_init(&bo->mn_list);
92 mutex_unlock(&adev->mn_lock);
94 hmm_mirror_unregister(&amn->mirror);
99 * amdgpu_hmm_mirror_release - callback to notify about mm destruction
101 * @mirror: the HMM mirror (mm) this callback is about
103 * Shedule a work item to lazy destroy HMM mirror.
105 static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
107 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
109 INIT_WORK(&amn->work, amdgpu_mn_destroy);
110 schedule_work(&amn->work);
114 * amdgpu_mn_lock - take the write side lock for this notifier
118 void amdgpu_mn_lock(struct amdgpu_mn *mn)
121 down_write(&mn->lock);
125 * amdgpu_mn_unlock - drop the write side lock for this notifier
129 void amdgpu_mn_unlock(struct amdgpu_mn *mn)
136 * amdgpu_mn_read_lock - take the read side lock for this notifier
139 * @blockable: is the notifier blockable
141 static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
144 down_read(&amn->lock);
145 else if (!down_read_trylock(&amn->lock))
152 * amdgpu_mn_read_unlock - drop the read side lock for this notifier
156 static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
162 * amdgpu_mn_invalidate_node - unmap all BOs of a node
164 * @node: the node with the BOs to unmap
165 * @start: start of address range affected
166 * @end: end of address range affected
168 * Block for operations on BOs to finish and mark pages as accessed and
171 static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
175 struct amdgpu_bo *bo;
178 list_for_each_entry(bo, &node->bos, mn_list) {
180 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
183 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
184 true, false, MAX_SCHEDULE_TIMEOUT);
186 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
191 * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
193 * @mirror: the hmm_mirror (mm) is about to update
194 * @update: the update start, end address
196 * Block for operations on BOs to finish and mark pages as accessed and
200 amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
201 const struct mmu_notifier_range *update)
203 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
204 unsigned long start = update->start;
205 unsigned long end = update->end;
206 bool blockable = mmu_notifier_range_blockable(update);
207 struct interval_tree_node *it;
209 /* notification is exclusive, but interval is inclusive */
212 /* TODO we should be able to split locking for interval tree and
213 * amdgpu_mn_invalidate_node
215 if (amdgpu_mn_read_lock(amn, blockable))
218 it = interval_tree_iter_first(&amn->objects, start, end);
220 struct amdgpu_mn_node *node;
223 amdgpu_mn_read_unlock(amn);
227 node = container_of(it, struct amdgpu_mn_node, it);
228 it = interval_tree_iter_next(it, start, end);
230 amdgpu_mn_invalidate_node(node, start, end);
233 amdgpu_mn_read_unlock(amn);
239 * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
241 * @mirror: the hmm_mirror (mm) is about to update
242 * @update: the update start, end address
244 * We temporarily evict all BOs between start and end. This
245 * necessitates evicting all user-mode queues of the process. The BOs
246 * are restorted in amdgpu_mn_invalidate_range_end_hsa.
249 amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
250 const struct mmu_notifier_range *update)
252 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
253 unsigned long start = update->start;
254 unsigned long end = update->end;
255 bool blockable = mmu_notifier_range_blockable(update);
256 struct interval_tree_node *it;
258 /* notification is exclusive, but interval is inclusive */
261 if (amdgpu_mn_read_lock(amn, blockable))
264 it = interval_tree_iter_first(&amn->objects, start, end);
266 struct amdgpu_mn_node *node;
267 struct amdgpu_bo *bo;
270 amdgpu_mn_read_unlock(amn);
274 node = container_of(it, struct amdgpu_mn_node, it);
275 it = interval_tree_iter_next(it, start, end);
277 list_for_each_entry(bo, &node->bos, mn_list) {
278 struct kgd_mem *mem = bo->kfd_bo;
280 if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
282 amdgpu_amdkfd_evict_userptr(mem, amn->mm);
286 amdgpu_mn_read_unlock(amn);
291 /* Low bits of any reasonable mm pointer will be unused due to struct
292 * alignment. Use these bits to make a unique key from the mm pointer
295 #define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
297 static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
298 [AMDGPU_MN_TYPE_GFX] = {
299 .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
300 .release = amdgpu_hmm_mirror_release
302 [AMDGPU_MN_TYPE_HSA] = {
303 .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
304 .release = amdgpu_hmm_mirror_release
309 * amdgpu_mn_get - create HMM mirror context
311 * @adev: amdgpu device pointer
312 * @type: type of MMU notifier context
314 * Creates a HMM mirror context for current->mm.
316 struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
317 enum amdgpu_mn_type type)
319 struct mm_struct *mm = current->mm;
320 struct amdgpu_mn *amn;
321 unsigned long key = AMDGPU_MN_KEY(mm, type);
324 mutex_lock(&adev->mn_lock);
325 if (down_write_killable(&mm->mmap_sem)) {
326 mutex_unlock(&adev->mn_lock);
327 return ERR_PTR(-EINTR);
330 hash_for_each_possible(adev->mn_hash, amn, node, key)
331 if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
334 amn = kzalloc(sizeof(*amn), GFP_KERNEL);
336 amn = ERR_PTR(-ENOMEM);
342 init_rwsem(&amn->lock);
344 amn->objects = RB_ROOT_CACHED;
346 amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
347 r = hmm_mirror_register(&amn->mirror, mm);
351 hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
354 up_write(&mm->mmap_sem);
355 mutex_unlock(&adev->mn_lock);
360 up_write(&mm->mmap_sem);
361 mutex_unlock(&adev->mn_lock);
368 * amdgpu_mn_register - register a BO for notifier updates
370 * @bo: amdgpu buffer object
371 * @addr: userptr addr we should monitor
373 * Registers an HMM mirror for the given BO at the specified address.
374 * Returns 0 on success, -ERRNO if anything goes wrong.
376 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
378 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
379 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
380 enum amdgpu_mn_type type =
381 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
382 struct amdgpu_mn *amn;
383 struct amdgpu_mn_node *node = NULL, *new_node;
384 struct list_head bos;
385 struct interval_tree_node *it;
387 amn = amdgpu_mn_get(adev, type);
391 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
395 INIT_LIST_HEAD(&bos);
397 down_write(&amn->lock);
399 while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
401 node = container_of(it, struct amdgpu_mn_node, it);
402 interval_tree_remove(&node->it, &amn->objects);
403 addr = min(it->start, addr);
404 end = max(it->last, end);
405 list_splice(&node->bos, &bos);
415 node->it.start = addr;
417 INIT_LIST_HEAD(&node->bos);
418 list_splice(&bos, &node->bos);
419 list_add(&bo->mn_list, &node->bos);
421 interval_tree_insert(&node->it, &amn->objects);
423 up_write(&amn->lock);
429 * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
431 * @bo: amdgpu buffer object
433 * Remove any registration of HMM mirror updates from the buffer object.
435 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
437 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
438 struct amdgpu_mn *amn;
439 struct list_head *head;
441 mutex_lock(&adev->mn_lock);
445 mutex_unlock(&adev->mn_lock);
449 down_write(&amn->lock);
451 /* save the next list entry for later */
452 head = bo->mn_list.next;
455 list_del_init(&bo->mn_list);
457 if (list_empty(head)) {
458 struct amdgpu_mn_node *node;
460 node = container_of(head, struct amdgpu_mn_node, bos);
461 interval_tree_remove(&node->it, &amn->objects);
465 up_write(&amn->lock);
466 mutex_unlock(&adev->mn_lock);
469 /* flags used by HMM internal, not related to CPU/GPU PTE flags */
470 static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
471 (1 << 0), /* HMM_PFN_VALID */
472 (1 << 1), /* HMM_PFN_WRITE */
473 0 /* HMM_PFN_DEVICE_PRIVATE */
476 static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
477 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
478 0, /* HMM_PFN_NONE */
479 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
482 void amdgpu_hmm_init_range(struct hmm_range *range)
485 range->flags = hmm_range_flags;
486 range->values = hmm_range_values;
487 range->pfn_shift = PAGE_SHIFT;