1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
6 #include <linux/sizes.h>
7 #include <linux/vfio_pci_core.h>
10 * The device memory usable to the workloads running in the VM is cached
11 * and showcased as a 64b device BAR (comprising of BAR4 and BAR5 region)
12 * to the VM and is represented as usemem.
13 * Moreover, the VM GPU device driver needs a non-cacheable region to
14 * support the MIG feature. This region is also exposed as a 64b BAR
15 * (comprising of BAR2 and BAR3 region) and represented as resmem.
17 #define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
18 #define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
20 /* Memory size expected as non cached and reserved by the VM driver */
21 #define RESMEM_SIZE SZ_1G
23 /* A hardwired and constant ABI value between the GPU FW and VFIO driver. */
24 #define MEMBLK_SIZE SZ_512M
27 * The state of the two device memory region - resmem and usemem - is
28 * saved as struct mem_region.
31 phys_addr_t memphys; /* Base physical address of the region */
32 size_t memlength; /* Region size */
33 size_t bar_size; /* Reported region BAR size */
34 __le64 bar_val; /* Emulated BAR offset registers */
38 }; /* Base virtual address of the region */
41 struct nvgrace_gpu_pci_core_device {
42 struct vfio_pci_core_device core_device;
43 /* Cached and usable memory for the VM. */
44 struct mem_region usemem;
45 /* Non cached memory carved out from the end of device memory */
46 struct mem_region resmem;
47 /* Lock to control device memory kernel mapping */
48 struct mutex remap_lock;
51 static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
53 struct nvgrace_gpu_pci_core_device *nvdev =
54 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
57 nvdev->resmem.bar_val = 0;
58 nvdev->usemem.bar_val = 0;
61 /* Choose the structure corresponding to the fake BAR with a given index. */
62 static struct mem_region *
63 nvgrace_gpu_memregion(int index,
64 struct nvgrace_gpu_pci_core_device *nvdev)
66 if (index == USEMEM_REGION_INDEX)
67 return &nvdev->usemem;
69 if (index == RESMEM_REGION_INDEX)
70 return &nvdev->resmem;
75 static int nvgrace_gpu_open_device(struct vfio_device *core_vdev)
77 struct vfio_pci_core_device *vdev =
78 container_of(core_vdev, struct vfio_pci_core_device, vdev);
79 struct nvgrace_gpu_pci_core_device *nvdev =
80 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
84 ret = vfio_pci_core_enable(vdev);
88 if (nvdev->usemem.memlength) {
89 nvgrace_gpu_init_fake_bar_emu_regs(core_vdev);
90 mutex_init(&nvdev->remap_lock);
93 vfio_pci_core_finish_enable(vdev);
98 static void nvgrace_gpu_close_device(struct vfio_device *core_vdev)
100 struct nvgrace_gpu_pci_core_device *nvdev =
101 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
104 /* Unmap the mapping to the device memory cached region */
105 if (nvdev->usemem.memaddr) {
106 memunmap(nvdev->usemem.memaddr);
107 nvdev->usemem.memaddr = NULL;
110 /* Unmap the mapping to the device memory non-cached region */
111 if (nvdev->resmem.ioaddr) {
112 iounmap(nvdev->resmem.ioaddr);
113 nvdev->resmem.ioaddr = NULL;
116 mutex_destroy(&nvdev->remap_lock);
118 vfio_pci_core_close_device(core_vdev);
121 static int nvgrace_gpu_mmap(struct vfio_device *core_vdev,
122 struct vm_area_struct *vma)
124 struct nvgrace_gpu_pci_core_device *nvdev =
125 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
127 struct mem_region *memregion;
128 unsigned long start_pfn;
129 u64 req_len, pgoff, end;
133 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
135 memregion = nvgrace_gpu_memregion(index, nvdev);
137 return vfio_pci_core_mmap(core_vdev, vma);
140 * Request to mmap the BAR. Map to the CPU accessible memory on the
141 * GPU using the memory information gathered from the system ACPI
144 pgoff = vma->vm_pgoff &
145 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
147 if (check_sub_overflow(vma->vm_end, vma->vm_start, &req_len) ||
148 check_add_overflow(PHYS_PFN(memregion->memphys), pgoff, &start_pfn) ||
149 check_add_overflow(PFN_PHYS(pgoff), req_len, &end))
153 * Check that the mapping request does not go beyond available device
156 if (end > memregion->memlength)
160 * The carved out region of the device memory needs the NORMAL_NC
161 * property. Communicate as such to the hypervisor.
163 if (index == RESMEM_REGION_INDEX) {
165 * The nvgrace-gpu module has no issues with uncontained
166 * failures on NORMAL_NC accesses. VM_ALLOW_ANY_UNCACHED is
167 * set to communicate to the KVM to S2 map as NORMAL_NC.
168 * This opens up guest usage of NORMAL_NC for this mapping.
170 vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED);
172 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
176 * Perform a PFN map to the memory and back the device BAR by the
179 * The available GPU memory size may not be power-of-2 aligned. The
180 * remainder is only backed by vfio_device_ops read/write handlers.
182 * During device reset, the GPU is safely disconnected to the CPU
183 * and access to the BAR will be immediately returned preventing
186 ret = remap_pfn_range(vma, vma->vm_start, start_pfn,
187 req_len, vma->vm_page_prot);
191 vma->vm_pgoff = start_pfn;
197 nvgrace_gpu_ioctl_get_region_info(struct vfio_device *core_vdev,
200 struct nvgrace_gpu_pci_core_device *nvdev =
201 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
203 unsigned long minsz = offsetofend(struct vfio_region_info, offset);
204 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
205 struct vfio_region_info_cap_sparse_mmap *sparse;
206 struct vfio_region_info info;
207 struct mem_region *memregion;
211 if (copy_from_user(&info, (void __user *)arg, minsz))
214 if (info.argsz < minsz)
218 * Request to determine the BAR region information. Send the
219 * GPU memory information.
221 memregion = nvgrace_gpu_memregion(info.index, nvdev);
223 return vfio_pci_core_ioctl(core_vdev,
224 VFIO_DEVICE_GET_REGION_INFO, arg);
226 size = struct_size(sparse, areas, 1);
229 * Setup for sparse mapping for the device memory. Only the
230 * available device memory on the hardware is shown as a
233 sparse = kzalloc(size, GFP_KERNEL);
237 sparse->nr_areas = 1;
238 sparse->areas[0].offset = 0;
239 sparse->areas[0].size = memregion->memlength;
240 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
241 sparse->header.version = 1;
243 ret = vfio_info_add_capability(&caps, &sparse->header, size);
248 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
250 * The region memory size may not be power-of-2 aligned.
251 * Given that the memory as a BAR and may not be
252 * aligned, roundup to the next power-of-2.
254 info.size = memregion->bar_size;
255 info.flags = VFIO_REGION_INFO_FLAG_READ |
256 VFIO_REGION_INFO_FLAG_WRITE |
257 VFIO_REGION_INFO_FLAG_MMAP;
260 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
261 if (info.argsz < sizeof(info) + caps.size) {
262 info.argsz = sizeof(info) + caps.size;
265 vfio_info_cap_shift(&caps, sizeof(info));
266 if (copy_to_user((void __user *)arg +
267 sizeof(info), caps.buf,
272 info.cap_offset = sizeof(info);
276 return copy_to_user((void __user *)arg, &info, minsz) ?
280 static long nvgrace_gpu_ioctl(struct vfio_device *core_vdev,
281 unsigned int cmd, unsigned long arg)
284 case VFIO_DEVICE_GET_REGION_INFO:
285 return nvgrace_gpu_ioctl_get_region_info(core_vdev, arg);
286 case VFIO_DEVICE_IOEVENTFD:
288 case VFIO_DEVICE_RESET:
289 nvgrace_gpu_init_fake_bar_emu_regs(core_vdev);
292 return vfio_pci_core_ioctl(core_vdev, cmd, arg);
297 nvgrace_gpu_get_read_value(size_t bar_size, u64 flags, __le64 val64)
301 tmp_val = le64_to_cpu(val64);
302 tmp_val &= ~(bar_size - 1);
305 return cpu_to_le64(tmp_val);
309 * Both the usable (usemem) and the reserved (resmem) device memory region
310 * are exposed as a 64b fake device BARs in the VM. These fake BARs must
311 * respond to the accesses on their respective PCI config space offsets.
313 * resmem BAR owns PCI_BASE_ADDRESS_2 & PCI_BASE_ADDRESS_3.
314 * usemem BAR owns PCI_BASE_ADDRESS_4 & PCI_BASE_ADDRESS_5.
317 nvgrace_gpu_read_config_emu(struct vfio_device *core_vdev,
318 char __user *buf, size_t count, loff_t *ppos)
320 struct nvgrace_gpu_pci_core_device *nvdev =
321 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
323 u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
324 struct mem_region *memregion = NULL;
326 size_t register_offset;
331 ret = vfio_pci_core_read(core_vdev, buf, count, ppos);
335 if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2,
337 ©_offset, ©_count,
339 memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev);
340 else if (vfio_pci_core_range_intersect_range(pos, count,
343 ©_offset, ©_count,
345 memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev);
348 val64 = nvgrace_gpu_get_read_value(memregion->bar_size,
349 PCI_BASE_ADDRESS_MEM_TYPE_64 |
350 PCI_BASE_ADDRESS_MEM_PREFETCH,
352 if (copy_to_user(buf + copy_offset,
353 (void *)&val64 + register_offset, copy_count)) {
355 * The position has been incremented in
356 * vfio_pci_core_read. Reset the offset back to the
368 nvgrace_gpu_write_config_emu(struct vfio_device *core_vdev,
369 const char __user *buf, size_t count, loff_t *ppos)
371 struct nvgrace_gpu_pci_core_device *nvdev =
372 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
374 u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
375 struct mem_region *memregion = NULL;
376 size_t register_offset;
380 if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2,
381 sizeof(u64), ©_offset,
382 ©_count, ®ister_offset))
383 memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev);
384 else if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_4,
385 sizeof(u64), ©_offset,
386 ©_count, ®ister_offset))
387 memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev);
390 if (copy_from_user((void *)&memregion->bar_val + register_offset,
391 buf + copy_offset, copy_count))
397 return vfio_pci_core_write(core_vdev, buf, count, ppos);
401 * Ad hoc map the device memory in the module kernel VA space. Primarily needed
402 * as vfio does not require the userspace driver to only perform accesses through
403 * mmaps of the vfio-pci BAR regions and such accesses should be supported using
404 * vfio_device_ops read/write implementations.
406 * The usemem region is cacheable memory and hence is memremaped.
407 * The resmem region is non-cached and is mapped using ioremap_wc (NORMAL_NC).
410 nvgrace_gpu_map_device_mem(int index,
411 struct nvgrace_gpu_pci_core_device *nvdev)
413 struct mem_region *memregion;
416 memregion = nvgrace_gpu_memregion(index, nvdev);
420 mutex_lock(&nvdev->remap_lock);
422 if (memregion->memaddr)
425 if (index == USEMEM_REGION_INDEX)
426 memregion->memaddr = memremap(memregion->memphys,
427 memregion->memlength,
430 memregion->ioaddr = ioremap_wc(memregion->memphys,
431 memregion->memlength);
433 if (!memregion->memaddr)
437 mutex_unlock(&nvdev->remap_lock);
443 * Read the data from the device memory (mapped either through ioremap
444 * or memremap) into the user buffer.
447 nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev,
448 char __user *buf, size_t mem_count, loff_t *ppos)
450 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
451 u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
458 * Handle read on the BAR regions. Map to the target device memory
459 * physical address and copy to the request read buffer.
461 ret = nvgrace_gpu_map_device_mem(index, nvdev);
465 if (index == USEMEM_REGION_INDEX) {
466 if (copy_to_user(buf,
467 (u8 *)nvdev->usemem.memaddr + offset,
472 * The hardware ensures that the system does not crash when
473 * the device memory is accessed with the memory enable
474 * turned off. It synthesizes ~0 on such read. So there is
475 * no need to check or support the disablement/enablement of
476 * BAR through PCI_COMMAND config space register. Pass
477 * test_mem flag as false.
479 ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
480 nvdev->resmem.ioaddr,
481 buf, offset, mem_count,
489 * Read count bytes from the device memory at an offset. The actual device
490 * memory size (available) may not be a power-of-2. So the driver fakes
491 * the size to a power-of-2 (reported) when exposing to a user space driver.
493 * Reads starting beyond the reported size generate -EINVAL; reads extending
494 * beyond the actual device size is filled with ~0; reads extending beyond
495 * the reported size are truncated.
498 nvgrace_gpu_read_mem(struct nvgrace_gpu_pci_core_device *nvdev,
499 char __user *buf, size_t count, loff_t *ppos)
501 u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
502 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
503 struct mem_region *memregion;
508 /* No need to do NULL check as caller does. */
509 memregion = nvgrace_gpu_memregion(index, nvdev);
511 if (offset >= memregion->bar_size)
514 /* Clip short the read request beyond reported BAR size */
515 count = min(count, memregion->bar_size - (size_t)offset);
518 * Determine how many bytes to be actually read from the device memory.
519 * Read request beyond the actual device memory size is filled with ~0,
520 * while those beyond the actual reported size is skipped.
522 if (offset >= memregion->memlength)
525 mem_count = min(count, memregion->memlength - (size_t)offset);
527 ret = nvgrace_gpu_map_and_read(nvdev, buf, mem_count, ppos);
532 * Only the device memory present on the hardware is mapped, which may
533 * not be power-of-2 aligned. A read to an offset beyond the device memory
534 * size is filled with ~0.
536 for (i = mem_count; i < count; i++) {
537 ret = put_user(val, (unsigned char __user *)(buf + i));
547 nvgrace_gpu_read(struct vfio_device *core_vdev,
548 char __user *buf, size_t count, loff_t *ppos)
550 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
551 struct nvgrace_gpu_pci_core_device *nvdev =
552 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
555 if (nvgrace_gpu_memregion(index, nvdev))
556 return nvgrace_gpu_read_mem(nvdev, buf, count, ppos);
558 if (index == VFIO_PCI_CONFIG_REGION_INDEX)
559 return nvgrace_gpu_read_config_emu(core_vdev, buf, count, ppos);
561 return vfio_pci_core_read(core_vdev, buf, count, ppos);
565 * Write the data to the device memory (mapped either through ioremap
566 * or memremap) from the user buffer.
569 nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev,
570 const char __user *buf, size_t mem_count,
573 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
574 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
580 ret = nvgrace_gpu_map_device_mem(index, nvdev);
584 if (index == USEMEM_REGION_INDEX) {
585 if (copy_from_user((u8 *)nvdev->usemem.memaddr + pos,
590 * The hardware ensures that the system does not crash when
591 * the device memory is accessed with the memory enable
592 * turned off. It drops such writes. So there is no need to
593 * check or support the disablement/enablement of BAR
594 * through PCI_COMMAND config space register. Pass test_mem
597 ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
598 nvdev->resmem.ioaddr,
599 (char __user *)buf, pos, mem_count,
607 * Write count bytes to the device memory at a given offset. The actual device
608 * memory size (available) may not be a power-of-2. So the driver fakes the
609 * size to a power-of-2 (reported) when exposing to a user space driver.
611 * Writes extending beyond the reported size are truncated; writes starting
612 * beyond the reported size generate -EINVAL.
615 nvgrace_gpu_write_mem(struct nvgrace_gpu_pci_core_device *nvdev,
616 size_t count, loff_t *ppos, const char __user *buf)
618 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
619 u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
620 struct mem_region *memregion;
624 /* No need to do NULL check as caller does. */
625 memregion = nvgrace_gpu_memregion(index, nvdev);
627 if (offset >= memregion->bar_size)
630 /* Clip short the write request beyond reported BAR size */
631 count = min(count, memregion->bar_size - (size_t)offset);
634 * Determine how many bytes to be actually written to the device memory.
635 * Do not write to the offset beyond available size.
637 if (offset >= memregion->memlength)
641 * Only the device memory present on the hardware is mapped, which may
642 * not be power-of-2 aligned. Drop access outside the available device
643 * memory on the hardware.
645 mem_count = min(count, memregion->memlength - (size_t)offset);
647 ret = nvgrace_gpu_map_and_write(nvdev, buf, mem_count, ppos);
657 nvgrace_gpu_write(struct vfio_device *core_vdev,
658 const char __user *buf, size_t count, loff_t *ppos)
660 struct nvgrace_gpu_pci_core_device *nvdev =
661 container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
663 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
665 if (nvgrace_gpu_memregion(index, nvdev))
666 return nvgrace_gpu_write_mem(nvdev, count, ppos, buf);
668 if (index == VFIO_PCI_CONFIG_REGION_INDEX)
669 return nvgrace_gpu_write_config_emu(core_vdev, buf, count, ppos);
671 return vfio_pci_core_write(core_vdev, buf, count, ppos);
674 static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
675 .name = "nvgrace-gpu-vfio-pci",
676 .init = vfio_pci_core_init_dev,
677 .release = vfio_pci_core_release_dev,
678 .open_device = nvgrace_gpu_open_device,
679 .close_device = nvgrace_gpu_close_device,
680 .ioctl = nvgrace_gpu_ioctl,
681 .device_feature = vfio_pci_core_ioctl_feature,
682 .read = nvgrace_gpu_read,
683 .write = nvgrace_gpu_write,
684 .mmap = nvgrace_gpu_mmap,
685 .request = vfio_pci_core_request,
686 .match = vfio_pci_core_match,
687 .bind_iommufd = vfio_iommufd_physical_bind,
688 .unbind_iommufd = vfio_iommufd_physical_unbind,
689 .attach_ioas = vfio_iommufd_physical_attach_ioas,
690 .detach_ioas = vfio_iommufd_physical_detach_ioas,
693 static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
694 .name = "nvgrace-gpu-vfio-pci-core",
695 .init = vfio_pci_core_init_dev,
696 .release = vfio_pci_core_release_dev,
697 .open_device = nvgrace_gpu_open_device,
698 .close_device = vfio_pci_core_close_device,
699 .ioctl = vfio_pci_core_ioctl,
700 .device_feature = vfio_pci_core_ioctl_feature,
701 .read = vfio_pci_core_read,
702 .write = vfio_pci_core_write,
703 .mmap = vfio_pci_core_mmap,
704 .request = vfio_pci_core_request,
705 .match = vfio_pci_core_match,
706 .bind_iommufd = vfio_iommufd_physical_bind,
707 .unbind_iommufd = vfio_iommufd_physical_unbind,
708 .attach_ioas = vfio_iommufd_physical_attach_ioas,
709 .detach_ioas = vfio_iommufd_physical_detach_ioas,
713 nvgrace_gpu_fetch_memory_property(struct pci_dev *pdev,
714 u64 *pmemphys, u64 *pmemlength)
719 * The memory information is present in the system ACPI tables as DSD
720 * properties nvidia,gpu-mem-base-pa and nvidia,gpu-mem-size.
722 ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-base-pa",
727 if (*pmemphys > type_max(phys_addr_t))
730 ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-size",
735 if (*pmemlength > type_max(size_t))
739 * If the C2C link is not up due to an error, the coherent device
740 * memory size is returned as 0. Fail in such case.
742 if (*pmemlength == 0)
749 nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
750 struct nvgrace_gpu_pci_core_device *nvdev,
751 u64 memphys, u64 memlength)
756 * The VM GPU device driver needs a non-cacheable region to support
757 * the MIG feature. Since the device memory is mapped as NORMAL cached,
758 * carve out a region from the end with a different NORMAL_NC
759 * property (called as reserved memory and represented as resmem). This
760 * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while
761 * exposing the rest (termed as usable memory and represented using usemem)
762 * as cacheable 64b BAR (region 4 and 5).
765 * |-------------------------------------------------|
767 * usemem.memphys resmem.memphys
769 nvdev->usemem.memphys = memphys;
772 * The device memory exposed to the VM is added to the kernel by the
773 * VM driver module in chunks of memory block size. Only the usable
774 * memory (usemem) is added to the kernel for usage by the VM
775 * workloads. Make the usable memory size memblock aligned.
777 if (check_sub_overflow(memlength, RESMEM_SIZE,
778 &nvdev->usemem.memlength)) {
784 * The USEMEM part of the device memory has to be MEMBLK_SIZE
785 * aligned. This is a hardwired ABI value between the GPU FW and
786 * VFIO driver. The VM device driver is also aware of it and make
787 * use of the value for its calculation to determine USEMEM size.
789 nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
791 if (nvdev->usemem.memlength == 0) {
796 if ((check_add_overflow(nvdev->usemem.memphys,
797 nvdev->usemem.memlength,
798 &nvdev->resmem.memphys)) ||
799 (check_sub_overflow(memlength, nvdev->usemem.memlength,
800 &nvdev->resmem.memlength))) {
806 * The memory regions are exposed as BARs. Calculate and save
807 * the BAR size for them.
809 nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
810 nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
815 static int nvgrace_gpu_probe(struct pci_dev *pdev,
816 const struct pci_device_id *id)
818 const struct vfio_device_ops *ops = &nvgrace_gpu_pci_core_ops;
819 struct nvgrace_gpu_pci_core_device *nvdev;
820 u64 memphys, memlength;
823 ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength);
825 ops = &nvgrace_gpu_pci_ops;
827 nvdev = vfio_alloc_device(nvgrace_gpu_pci_core_device, core_device.vdev,
830 return PTR_ERR(nvdev);
832 dev_set_drvdata(&pdev->dev, &nvdev->core_device);
834 if (ops == &nvgrace_gpu_pci_ops) {
836 * Device memory properties are identified in the host ACPI
837 * table. Set the nvgrace_gpu_pci_core_device structure.
839 ret = nvgrace_gpu_init_nvdev_struct(pdev, nvdev,
845 ret = vfio_pci_core_register_device(&nvdev->core_device);
852 vfio_put_device(&nvdev->core_device.vdev);
856 static void nvgrace_gpu_remove(struct pci_dev *pdev)
858 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
860 vfio_pci_core_unregister_device(core_device);
861 vfio_put_device(&core_device->vdev);
864 static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = {
866 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) },
868 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) },
872 MODULE_DEVICE_TABLE(pci, nvgrace_gpu_vfio_pci_table);
874 static struct pci_driver nvgrace_gpu_vfio_pci_driver = {
875 .name = KBUILD_MODNAME,
876 .id_table = nvgrace_gpu_vfio_pci_table,
877 .probe = nvgrace_gpu_probe,
878 .remove = nvgrace_gpu_remove,
879 .err_handler = &vfio_pci_core_err_handlers,
880 .driver_managed_dma = true,
883 module_pci_driver(nvgrace_gpu_vfio_pci_driver);
885 MODULE_LICENSE("GPL");
886 MODULE_AUTHOR("Ankit Agrawal <ankita@nvidia.com>");
887 MODULE_AUTHOR("Aniket Agashe <aniketa@nvidia.com>");
888 MODULE_DESCRIPTION("VFIO NVGRACE GPU PF - User Level driver for NVIDIA devices with CPU coherently accessible device memory");