1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "../habanalabs.h"
9 #include "../../include/hw_ip/mmu/mmu_general.h"
11 #include <linux/slab.h>
13 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
15 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
17 struct pgt_info *pgt_info = NULL;
19 hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
20 (unsigned long) hop_addr)
21 if (hop_addr == pgt_info->shadow_addr)
27 static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
29 struct hl_device *hdev = ctx->hdev;
31 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
32 hdev->asic_prop.mmu_hop_table_size);
33 hash_del(&pgt_info->node);
34 kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
38 static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
40 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
42 _free_hop(ctx, pgt_info);
45 static u64 alloc_hop(struct hl_ctx *ctx)
47 struct hl_device *hdev = ctx->hdev;
48 struct asic_fixed_properties *prop = &hdev->asic_prop;
49 struct pgt_info *pgt_info;
50 u64 phys_addr, shadow_addr;
52 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
56 phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
57 prop->mmu_hop_table_size);
59 dev_err(hdev->dev, "failed to allocate page\n");
63 shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
68 pgt_info->phys_addr = phys_addr;
69 pgt_info->shadow_addr = shadow_addr;
71 pgt_info->num_of_ptes = 0;
72 hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
77 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
78 prop->mmu_hop_table_size);
85 static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
87 return ctx->hdev->asic_prop.mmu_pgt_addr +
88 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
91 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
93 return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
94 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
97 static void flush(struct hl_ctx *ctx)
99 /* flush all writes from all cores to reach PCI */
101 ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
104 /* transform the value to physical address when writing to H/W */
105 static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
108 * The value to write is actually the address of the next shadow hop +
109 * flags at the 12 LSBs.
110 * Hence in order to get the value to write to the physical PTE, we
111 * clear the 12 LSBs and translate the shadow hop to its associated
112 * physical hop, and add back the original 12 LSBs.
114 u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
117 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
118 get_phys_addr(ctx, shadow_pte_addr),
121 *(u64 *) (uintptr_t) shadow_pte_addr = val;
124 /* do not transform the value to physical address when writing to H/W */
125 static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
128 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
129 get_phys_addr(ctx, shadow_pte_addr),
131 *(u64 *) (uintptr_t) shadow_pte_addr = val;
134 /* clear the last and present bits */
135 static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
137 /* no need to transform the value to physical address */
138 write_final_pte(ctx, pte_addr, 0);
141 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
143 get_pgt_info(ctx, hop_addr)->num_of_ptes++;
147 * put_pte - decrement the num of ptes and free the hop if possible
149 * @ctx: pointer to the context structure
150 * @hop_addr: addr of the hop
152 * This function returns the number of ptes left on this hop. If the number is
153 * 0, it means the pte was freed.
155 static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
157 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
158 int num_of_ptes_left;
160 pgt_info->num_of_ptes--;
163 * Need to save the number of ptes left because free_hop might free
166 num_of_ptes_left = pgt_info->num_of_ptes;
167 if (!num_of_ptes_left)
168 _free_hop(ctx, pgt_info);
170 return num_of_ptes_left;
173 static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
174 u64 virt_addr, u64 mask, u64 shift)
176 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
177 ((virt_addr & mask) >> shift);
180 static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
181 struct hl_mmu_properties *mmu_prop,
182 u64 hop_addr, u64 vaddr)
184 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
185 mmu_prop->hop0_shift);
188 static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
189 struct hl_mmu_properties *mmu_prop,
190 u64 hop_addr, u64 vaddr)
192 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
193 mmu_prop->hop1_shift);
196 static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
197 struct hl_mmu_properties *mmu_prop,
198 u64 hop_addr, u64 vaddr)
200 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
201 mmu_prop->hop2_shift);
204 static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
205 struct hl_mmu_properties *mmu_prop,
206 u64 hop_addr, u64 vaddr)
208 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
209 mmu_prop->hop3_shift);
212 static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
213 struct hl_mmu_properties *mmu_prop,
214 u64 hop_addr, u64 vaddr)
216 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
217 mmu_prop->hop4_shift);
220 static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
222 if (curr_pte & PAGE_PRESENT_MASK)
223 return curr_pte & HOP_PHYS_ADDR_MASK;
228 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
231 u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
233 if (hop_addr == ULLONG_MAX) {
234 hop_addr = alloc_hop(ctx);
235 *is_new_hop = (hop_addr != ULLONG_MAX);
241 /* translates shadow address inside hop to a physical address */
242 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
244 u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
245 u64 shadow_hop_addr = shadow_addr & ~page_mask;
246 u64 pte_offset = shadow_addr & page_mask;
249 if (shadow_hop_addr != get_hop0_addr(ctx))
250 phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
252 phys_hop_addr = get_phys_hop0_addr(ctx);
254 return phys_hop_addr + pte_offset;
257 static int dram_default_mapping_init(struct hl_ctx *ctx)
259 struct hl_device *hdev = ctx->hdev;
260 struct asic_fixed_properties *prop = &hdev->asic_prop;
261 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
262 hop2_pte_addr, hop3_pte_addr, pte_val;
263 int rc, i, j, hop3_allocated = 0;
265 if ((!prop->dram_supports_virtual_memory) ||
266 (!hdev->dram_default_page_mapping) ||
267 (ctx->asid == HL_KERNEL_ASID_ID))
270 num_of_hop3 = prop->dram_size_for_default_page_mapping;
271 do_div(num_of_hop3, prop->dram_page_size);
272 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
274 /* add hop1 and hop2 */
275 total_hops = num_of_hop3 + 2;
277 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
278 if (!ctx->dram_default_hops)
281 hop0_addr = get_hop0_addr(ctx);
283 hop1_addr = alloc_hop(ctx);
284 if (hop1_addr == ULLONG_MAX) {
285 dev_err(hdev->dev, "failed to alloc hop 1\n");
290 ctx->dram_default_hops[total_hops - 1] = hop1_addr;
292 hop2_addr = alloc_hop(ctx);
293 if (hop2_addr == ULLONG_MAX) {
294 dev_err(hdev->dev, "failed to alloc hop 2\n");
299 ctx->dram_default_hops[total_hops - 2] = hop2_addr;
301 for (i = 0 ; i < num_of_hop3 ; i++) {
302 ctx->dram_default_hops[i] = alloc_hop(ctx);
303 if (ctx->dram_default_hops[i] == ULLONG_MAX) {
304 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
311 /* need only pte 0 in hops 0 and 1 */
312 pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
313 write_pte(ctx, hop0_addr, pte_val);
315 pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
316 write_pte(ctx, hop1_addr, pte_val);
317 get_pte(ctx, hop1_addr);
319 hop2_pte_addr = hop2_addr;
320 for (i = 0 ; i < num_of_hop3 ; i++) {
321 pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
323 write_pte(ctx, hop2_pte_addr, pte_val);
324 get_pte(ctx, hop2_addr);
325 hop2_pte_addr += HL_PTE_SIZE;
328 pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
329 LAST_MASK | PAGE_PRESENT_MASK;
331 for (i = 0 ; i < num_of_hop3 ; i++) {
332 hop3_pte_addr = ctx->dram_default_hops[i];
333 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
334 write_final_pte(ctx, hop3_pte_addr, pte_val);
335 get_pte(ctx, ctx->dram_default_hops[i]);
336 hop3_pte_addr += HL_PTE_SIZE;
345 for (i = 0 ; i < hop3_allocated ; i++)
346 free_hop(ctx, ctx->dram_default_hops[i]);
348 free_hop(ctx, hop2_addr);
350 free_hop(ctx, hop1_addr);
352 kfree(ctx->dram_default_hops);
357 static void dram_default_mapping_fini(struct hl_ctx *ctx)
359 struct hl_device *hdev = ctx->hdev;
360 struct asic_fixed_properties *prop = &hdev->asic_prop;
361 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
362 hop2_pte_addr, hop3_pte_addr;
365 if ((!prop->dram_supports_virtual_memory) ||
366 (!hdev->dram_default_page_mapping) ||
367 (ctx->asid == HL_KERNEL_ASID_ID))
370 num_of_hop3 = prop->dram_size_for_default_page_mapping;
371 do_div(num_of_hop3, prop->dram_page_size);
372 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
374 hop0_addr = get_hop0_addr(ctx);
375 /* add hop1 and hop2 */
376 total_hops = num_of_hop3 + 2;
377 hop1_addr = ctx->dram_default_hops[total_hops - 1];
378 hop2_addr = ctx->dram_default_hops[total_hops - 2];
380 for (i = 0 ; i < num_of_hop3 ; i++) {
381 hop3_pte_addr = ctx->dram_default_hops[i];
382 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
383 clear_pte(ctx, hop3_pte_addr);
384 put_pte(ctx, ctx->dram_default_hops[i]);
385 hop3_pte_addr += HL_PTE_SIZE;
389 hop2_pte_addr = hop2_addr;
390 hop2_pte_addr = hop2_addr;
391 for (i = 0 ; i < num_of_hop3 ; i++) {
392 clear_pte(ctx, hop2_pte_addr);
393 put_pte(ctx, hop2_addr);
394 hop2_pte_addr += HL_PTE_SIZE;
397 clear_pte(ctx, hop1_addr);
398 put_pte(ctx, hop1_addr);
399 clear_pte(ctx, hop0_addr);
401 kfree(ctx->dram_default_hops);
407 * hl_mmu_v1_init() - initialize the MMU module.
408 * @hdev: habanalabs device structure.
410 * This function does the following:
411 * - Create a pool of pages for pgt_infos.
412 * - Create a shadow table for pgt
414 * Return: 0 for success, non-zero for failure.
416 static int hl_mmu_v1_init(struct hl_device *hdev)
418 struct asic_fixed_properties *prop = &hdev->asic_prop;
421 hdev->mmu_priv.dr.mmu_pgt_pool =
422 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
424 if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
425 dev_err(hdev->dev, "Failed to create page gen pool\n");
429 rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
430 prop->mmu_hop0_tables_total_size,
431 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
434 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
438 hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
439 prop->mmu_hop_table_size,
440 GFP_KERNEL | __GFP_ZERO);
441 if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
446 /* MMU H/W init will be done in device hw_init() */
451 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
457 * hl_mmu_fini() - release the MMU module.
458 * @hdev: habanalabs device structure.
460 * This function does the following:
461 * - Disable MMU in H/W.
462 * - Free the pgt_infos pool.
464 * All contexts should be freed before calling this function.
466 static void hl_mmu_v1_fini(struct hl_device *hdev)
468 /* MMU H/W fini was already done in device hw_fini() */
470 if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
471 kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
472 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
475 /* Make sure that if we arrive here again without init was called we
476 * won't cause kernel panic. This can happen for example if we fail
477 * during hard reset code at certain points
479 hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
483 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
484 * @ctx: pointer to the context structure to initialize.
486 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
487 * page tables hops related to this context.
488 * Return: 0 on success, non-zero otherwise.
490 static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
492 hash_init(ctx->mmu_shadow_hash);
493 return dram_default_mapping_init(ctx);
497 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
499 * @ctx: pointer to the context structure
501 * This function does the following:
502 * - Free any pgts which were not freed yet
504 * - Free DRAM default page mapping hops
506 static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
508 struct hl_device *hdev = ctx->hdev;
509 struct pgt_info *pgt_info;
510 struct hlist_node *tmp;
513 dram_default_mapping_fini(ctx);
515 if (!hash_empty(ctx->mmu_shadow_hash))
516 dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
519 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
520 dev_err_ratelimited(hdev->dev,
521 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
522 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
523 _free_hop(ctx, pgt_info);
527 static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
528 u64 virt_addr, bool is_dram_addr)
530 struct hl_device *hdev = ctx->hdev;
531 struct asic_fixed_properties *prop = &hdev->asic_prop;
532 struct hl_mmu_properties *mmu_prop;
533 u64 hop0_addr = 0, hop0_pte_addr = 0,
534 hop1_addr = 0, hop1_pte_addr = 0,
535 hop2_addr = 0, hop2_pte_addr = 0,
536 hop3_addr = 0, hop3_pte_addr = 0,
537 hop4_addr = 0, hop4_pte_addr = 0,
539 bool is_huge, clear_hop3 = true;
541 /* shifts and masks are the same in PMMU and HPMMU, use one of them */
542 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
544 hop0_addr = get_hop0_addr(ctx);
545 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
547 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
549 hop1_addr = get_next_hop_addr(ctx, curr_pte);
551 if (hop1_addr == ULLONG_MAX)
554 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
556 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
558 hop2_addr = get_next_hop_addr(ctx, curr_pte);
560 if (hop2_addr == ULLONG_MAX)
563 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
565 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
567 hop3_addr = get_next_hop_addr(ctx, curr_pte);
569 if (hop3_addr == ULLONG_MAX)
572 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
574 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
576 is_huge = curr_pte & LAST_MASK;
578 if (is_dram_addr && !is_huge) {
580 "DRAM unmapping should use huge pages only\n");
585 hop4_addr = get_next_hop_addr(ctx, curr_pte);
587 if (hop4_addr == ULLONG_MAX)
590 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
593 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
598 if (hdev->dram_default_page_mapping && is_dram_addr) {
599 u64 default_pte = (prop->mmu_dram_default_page_addr &
600 HOP_PHYS_ADDR_MASK) | LAST_MASK |
602 if (curr_pte == default_pte) {
604 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
609 if (!(curr_pte & PAGE_PRESENT_MASK)) {
611 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
616 write_final_pte(ctx, hop3_pte_addr, default_pte);
617 put_pte(ctx, hop3_addr);
619 if (!(curr_pte & PAGE_PRESENT_MASK))
623 clear_pte(ctx, hop4_pte_addr);
625 clear_pte(ctx, hop3_pte_addr);
627 if (hop4_addr && !put_pte(ctx, hop4_addr))
633 clear_pte(ctx, hop3_pte_addr);
635 if (put_pte(ctx, hop3_addr))
638 clear_pte(ctx, hop2_pte_addr);
640 if (put_pte(ctx, hop2_addr))
643 clear_pte(ctx, hop1_pte_addr);
645 if (put_pte(ctx, hop1_addr))
648 clear_pte(ctx, hop0_pte_addr);
655 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
661 static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
662 u32 page_size, bool is_dram_addr)
664 struct hl_device *hdev = ctx->hdev;
665 struct asic_fixed_properties *prop = &hdev->asic_prop;
666 struct hl_mmu_properties *mmu_prop;
667 u64 hop0_addr = 0, hop0_pte_addr = 0,
668 hop1_addr = 0, hop1_pte_addr = 0,
669 hop2_addr = 0, hop2_pte_addr = 0,
670 hop3_addr = 0, hop3_pte_addr = 0,
671 hop4_addr = 0, hop4_pte_addr = 0,
673 bool hop1_new = false, hop2_new = false, hop3_new = false,
674 hop4_new = false, is_huge;
678 * This mapping function can map a page or a huge page. For huge page
679 * there are only 3 hops rather than 4. Currently the DRAM allocation
680 * uses huge pages only but user memory could have been allocated with
681 * one of the two page sizes. Since this is a common code for all the
682 * three cases, we need this hugs page check.
685 mmu_prop = &prop->dmmu;
687 } else if (page_size == prop->pmmu_huge.page_size) {
688 mmu_prop = &prop->pmmu_huge;
691 mmu_prop = &prop->pmmu;
695 hop0_addr = get_hop0_addr(ctx);
696 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
697 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
699 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
700 if (hop1_addr == ULLONG_MAX)
703 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
704 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
706 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
707 if (hop2_addr == ULLONG_MAX)
710 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
711 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
713 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
714 if (hop3_addr == ULLONG_MAX)
717 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
718 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
721 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
722 if (hop4_addr == ULLONG_MAX)
725 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
727 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
730 if (hdev->dram_default_page_mapping && is_dram_addr) {
731 u64 default_pte = (prop->mmu_dram_default_page_addr &
732 HOP_PHYS_ADDR_MASK) | LAST_MASK |
735 if (curr_pte != default_pte) {
737 "DRAM: mapping already exists for virt_addr 0x%llx\n",
743 if (hop1_new || hop2_new || hop3_new || hop4_new) {
745 "DRAM mapping should not allocate more hops\n");
749 } else if (curr_pte & PAGE_PRESENT_MASK) {
751 "mapping already exists for virt_addr 0x%llx\n",
754 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
755 *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
756 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
757 *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
758 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
759 *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
760 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
761 *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
764 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
765 *(u64 *) (uintptr_t) hop4_pte_addr,
772 curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
776 write_final_pte(ctx, hop3_pte_addr, curr_pte);
778 write_final_pte(ctx, hop4_pte_addr, curr_pte);
782 (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
783 write_pte(ctx, hop0_pte_addr, curr_pte);
787 (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
788 write_pte(ctx, hop1_pte_addr, curr_pte);
789 get_pte(ctx, hop1_addr);
793 (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
794 write_pte(ctx, hop2_pte_addr, curr_pte);
795 get_pte(ctx, hop2_addr);
800 curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
802 write_pte(ctx, hop3_pte_addr, curr_pte);
803 get_pte(ctx, hop3_addr);
806 get_pte(ctx, hop4_addr);
808 get_pte(ctx, hop3_addr);
815 free_hop(ctx, hop4_addr);
817 free_hop(ctx, hop3_addr);
819 free_hop(ctx, hop2_addr);
821 free_hop(ctx, hop1_addr);
827 * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
829 * @ctx: pointer to the context structure
832 static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
838 * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
840 * @ctx: pointer to the context structure
843 static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
848 static inline u64 get_hop_pte_addr(struct hl_ctx *ctx,
849 struct hl_mmu_properties *mmu_prop,
850 int hop_num, u64 hop_addr, u64 virt_addr)
854 return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
856 return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
858 return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
860 return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
862 return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
869 static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
870 struct hl_mmu_hop_info *hops)
872 struct hl_device *hdev = ctx->hdev;
873 struct asic_fixed_properties *prop = &hdev->asic_prop;
874 struct hl_mmu_properties *mmu_prop;
875 bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
878 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
879 prop->dmmu.start_addr,
880 prop->dmmu.end_addr);
881 is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
882 prop->pmmu.start_addr,
883 prop->pmmu.end_addr);
884 is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
885 prop->pmmu_huge.page_size,
886 prop->pmmu_huge.start_addr,
887 prop->pmmu_huge.end_addr);
889 mmu_prop = &prop->dmmu;
891 } else if (is_pmmu_addr) {
892 mmu_prop = &prop->pmmu;
894 } else if (is_pmmu_h_addr) {
895 mmu_prop = &prop->pmmu_huge;
901 used_hops = mmu_prop->num_hops;
903 /* huge pages use lesser hops */
907 hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
908 hops->hop_info[0].hop_pte_addr =
909 get_hop_pte_addr(ctx, mmu_prop, 0,
910 hops->hop_info[0].hop_addr, virt_addr);
911 hops->hop_info[0].hop_pte_val =
912 hdev->asic_funcs->read_pte(hdev,
913 hops->hop_info[0].hop_pte_addr);
915 for (i = 1 ; i < used_hops ; i++) {
916 hops->hop_info[i].hop_addr =
917 get_next_hop_addr(ctx,
918 hops->hop_info[i - 1].hop_pte_val);
919 if (hops->hop_info[i].hop_addr == ULLONG_MAX)
922 hops->hop_info[i].hop_pte_addr =
923 get_hop_pte_addr(ctx, mmu_prop, i,
924 hops->hop_info[i].hop_addr,
926 hops->hop_info[i].hop_pte_val =
927 hdev->asic_funcs->read_pte(hdev,
928 hops->hop_info[i].hop_pte_addr);
930 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
933 if (hops->hop_info[i].hop_pte_val & LAST_MASK)
937 /* if passed over all hops then no last hop was found */
938 if (i == mmu_prop->num_hops)
941 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
944 hops->used_hops = i + 1;
950 * hl_mmu_v1_prepare - prepare mmu for working with mmu v1
952 * @hdev: pointer to the device structure
954 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
956 mmu->init = hl_mmu_v1_init;
957 mmu->fini = hl_mmu_v1_fini;
958 mmu->ctx_init = hl_mmu_v1_ctx_init;
959 mmu->ctx_fini = hl_mmu_v1_ctx_fini;
960 mmu->map = _hl_mmu_v1_map;
961 mmu->unmap = _hl_mmu_v1_unmap;
963 mmu->swap_out = hl_mmu_v1_swap_out;
964 mmu->swap_in = hl_mmu_v1_swap_in;
965 mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;