1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2020 Cornelis Networks, Inc.
4 * Copyright(c) 2016 - 2017 Intel Corporation.
7 #include <linux/list.h>
8 #include <linux/rculist.h>
9 #include <linux/mmu_notifier.h>
10 #include <linux/interval_tree_generic.h>
11 #include <linux/sched/mm.h>
16 static unsigned long mmu_node_start(struct mmu_rb_node *);
17 static unsigned long mmu_node_last(struct mmu_rb_node *);
18 static int mmu_notifier_range_start(struct mmu_notifier *,
19 const struct mmu_notifier_range *);
20 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
21 unsigned long, unsigned long);
22 static void do_remove(struct mmu_rb_handler *handler,
23 struct list_head *del_list);
24 static void handle_remove(struct work_struct *work);
26 static const struct mmu_notifier_ops mn_opts = {
27 .invalidate_range_start = mmu_notifier_range_start,
30 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
31 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
33 static unsigned long mmu_node_start(struct mmu_rb_node *node)
35 return node->addr & PAGE_MASK;
38 static unsigned long mmu_node_last(struct mmu_rb_node *node)
40 return PAGE_ALIGN(node->addr + node->len) - 1;
43 int hfi1_mmu_rb_register(void *ops_arg,
44 struct mmu_rb_ops *ops,
45 struct workqueue_struct *wq,
46 struct mmu_rb_handler **handler)
48 struct mmu_rb_handler *h;
51 h = kzalloc(sizeof(*h), GFP_KERNEL);
55 h->root = RB_ROOT_CACHED;
58 INIT_HLIST_NODE(&h->mn.hlist);
59 spin_lock_init(&h->lock);
61 INIT_WORK(&h->del_work, handle_remove);
62 INIT_LIST_HEAD(&h->del_list);
63 INIT_LIST_HEAD(&h->lru_list);
66 ret = mmu_notifier_register(&h->mn, current->mm);
76 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
78 struct mmu_rb_node *rbnode;
81 struct list_head del_list;
83 /* Unregister first so we don't get any more notifications. */
84 mmu_notifier_unregister(&handler->mn, handler->mn.mm);
87 * Make sure the wq delete handler is finished running. It will not
88 * be triggered once the mmu notifiers are unregistered above.
90 flush_work(&handler->del_work);
92 INIT_LIST_HEAD(&del_list);
94 spin_lock_irqsave(&handler->lock, flags);
95 while ((node = rb_first_cached(&handler->root))) {
96 rbnode = rb_entry(node, struct mmu_rb_node, node);
97 rb_erase_cached(node, &handler->root);
98 /* move from LRU list to delete list */
99 list_move(&rbnode->list, &del_list);
101 spin_unlock_irqrestore(&handler->lock, flags);
103 do_remove(handler, &del_list);
108 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
109 struct mmu_rb_node *mnode)
111 struct mmu_rb_node *node;
115 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
117 if (current->mm != handler->mn.mm)
120 spin_lock_irqsave(&handler->lock, flags);
121 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
126 __mmu_int_rb_insert(mnode, &handler->root);
127 list_add(&mnode->list, &handler->lru_list);
129 ret = handler->ops->insert(handler->ops_arg, mnode);
131 __mmu_int_rb_remove(mnode, &handler->root);
132 list_del(&mnode->list); /* remove from LRU list */
134 mnode->handler = handler;
136 spin_unlock_irqrestore(&handler->lock, flags);
140 /* Caller must hold handler lock */
141 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
145 struct mmu_rb_node *node = NULL;
147 trace_hfi1_mmu_rb_search(addr, len);
148 if (!handler->ops->filter) {
149 node = __mmu_int_rb_iter_first(&handler->root, addr,
152 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
155 node = __mmu_int_rb_iter_next(node, addr,
157 if (handler->ops->filter(node, addr, len))
164 bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
165 unsigned long addr, unsigned long len,
166 struct mmu_rb_node **rb_node)
168 struct mmu_rb_node *node;
172 if (current->mm != handler->mn.mm)
175 spin_lock_irqsave(&handler->lock, flags);
176 node = __mmu_rb_search(handler, addr, len);
178 if (node->addr == addr && node->len == len)
180 __mmu_int_rb_remove(node, &handler->root);
181 list_del(&node->list); /* remove from LRU list */
185 spin_unlock_irqrestore(&handler->lock, flags);
190 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
192 struct mmu_rb_node *rbnode, *ptr;
193 struct list_head del_list;
197 if (current->mm != handler->mn.mm)
200 INIT_LIST_HEAD(&del_list);
202 spin_lock_irqsave(&handler->lock, flags);
203 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
205 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
207 __mmu_int_rb_remove(rbnode, &handler->root);
208 /* move from LRU list to delete list */
209 list_move(&rbnode->list, &del_list);
214 spin_unlock_irqrestore(&handler->lock, flags);
216 while (!list_empty(&del_list)) {
217 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
218 list_del(&rbnode->list);
219 handler->ops->remove(handler->ops_arg, rbnode);
224 * It is up to the caller to ensure that this function does not race with the
225 * mmu invalidate notifier which may be calling the users remove callback on
228 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
229 struct mmu_rb_node *node)
233 if (current->mm != handler->mn.mm)
236 /* Validity of handler and node pointers has been checked by caller. */
237 trace_hfi1_mmu_rb_remove(node->addr, node->len);
238 spin_lock_irqsave(&handler->lock, flags);
239 __mmu_int_rb_remove(node, &handler->root);
240 list_del(&node->list); /* remove from LRU list */
241 spin_unlock_irqrestore(&handler->lock, flags);
243 handler->ops->remove(handler->ops_arg, node);
246 static int mmu_notifier_range_start(struct mmu_notifier *mn,
247 const struct mmu_notifier_range *range)
249 struct mmu_rb_handler *handler =
250 container_of(mn, struct mmu_rb_handler, mn);
251 struct rb_root_cached *root = &handler->root;
252 struct mmu_rb_node *node, *ptr = NULL;
256 spin_lock_irqsave(&handler->lock, flags);
257 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
259 /* Guard against node removal. */
260 ptr = __mmu_int_rb_iter_next(node, range->start,
262 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
263 if (handler->ops->invalidate(handler->ops_arg, node)) {
264 __mmu_int_rb_remove(node, root);
265 /* move from LRU list to delete list */
266 list_move(&node->list, &handler->del_list);
270 spin_unlock_irqrestore(&handler->lock, flags);
273 queue_work(handler->wq, &handler->del_work);
279 * Call the remove function for the given handler and the list. This
280 * is expected to be called with a delete list extracted from handler.
281 * The caller should not be holding the handler lock.
283 static void do_remove(struct mmu_rb_handler *handler,
284 struct list_head *del_list)
286 struct mmu_rb_node *node;
288 while (!list_empty(del_list)) {
289 node = list_first_entry(del_list, struct mmu_rb_node, list);
290 list_del(&node->list);
291 handler->ops->remove(handler->ops_arg, node);
296 * Work queue function to remove all nodes that have been queued up to
297 * be removed. The key feature is that mm->mmap_lock is not being held
298 * and the remove callback can sleep while taking it, if needed.
300 static void handle_remove(struct work_struct *work)
302 struct mmu_rb_handler *handler = container_of(work,
303 struct mmu_rb_handler,
305 struct list_head del_list;
308 /* remove anything that is queued to get removed */
309 spin_lock_irqsave(&handler->lock, flags);
310 list_replace_init(&handler->del_list, &del_list);
311 spin_unlock_irqrestore(&handler->lock, flags);
313 do_remove(handler, &del_list);