--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Analogix Semiconductor, Inc.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/bridge/analogix,anx7625.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Analogix ANX7625 SlimPort (4K Mobile HD Transmitter)
+
+maintainers:
+ - Xin Ji <xji@analogixsemi.com>
+
+description: |
+ The ANX7625 is an ultra-low power 4K Mobile HD Transmitter
+ designed for portable devices.
+
+properties:
+ compatible:
+ items:
+ - const: analogix,anx7625
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: used for interrupt pin B8.
+ maxItems: 1
+
+ enable-gpios:
+ description: used for power on chip control, POWER_EN pin D2.
+ maxItems: 1
+
+ reset-gpios:
+ description: used for reset chip control, RESET_N pin B7.
+ maxItems: 1
+
+ ports:
+ type: object
+
+ properties:
+ port@0:
+ type: object
+ description:
+ Video port for MIPI DSI input.
+
+ port@1:
+ type: object
+ description:
+ Video port for panel or connector.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ encoder@58 {
+ compatible = "analogix,anx7625";
+ reg = <0x58>;
+ enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi2dp_bridge_in: port@0 {
+ reg = <0>;
+ anx7625_in: endpoint {
+ remote-endpoint = <&mipi_dsi>;
+ };
+ };
+
+ mipi2dp_bridge_out: port@1 {
+ reg = <1>;
+ anx7625_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+ };
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt36672a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT36672A based DSI display Panels
+
+maintainers:
+ - Sumit Semwal <sumit.semwal@linaro.org>
+
+description: |
+ The nt36672a IC from Novatek is a generic DSI Panel IC used to drive dsi
+ panels.
+ Right now, support is added only for a Tianma FHD+ LCD display panel with a
+ resolution of 1080x2246. It is a video mode DSI panel.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - tianma,fhd-video
+ - const: novatek,nt36672a
+ description: This indicates the panel manufacturer of the panel that is
+ in turn using the NT36672A panel driver. This compatible string
+ determines how the NT36672A panel driver is configured for the indicated
+ panel. The novatek,nt36672a compatible shall always be provided as a fallback.
+
+ reset-gpios:
+ description: phandle of gpio for reset line - This should be 8mA, gpio
+ can be configured using mux, pinctrl, pinctrl-names (active high)
+
+ vddio-supply:
+ description: phandle of the regulator that provides the supply voltage
+ Power IC supply
+
+ vddpos-supply:
+ description: phandle of the positive boost supply regulator
+
+ vddneg-supply:
+ description: phandle of the negative boost supply regulator
+
+ reg: true
+ port: true
+
+required:
+ - compatible
+ - reg
+ - vddi0-supply
+ - vddpos-supply
+ - vddneg-supply
+ - reset-gpios
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |+
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "tianma,fhd-video", "novatek,nt36672a";
+ reg = <0>;
+ vddi0-supply = <&vreg_l14a_1p88>;
+ vddpos-supply = <&lab>;
+ vddneg-supply = <&ibb>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port {
+ tianma_nt36672a_in_0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+
+...
- panasonic,vvx10f004b00
# Panasonic 10" WUXGA TFT LCD panel
- panasonic,vvx10f034n00
+ # Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel
+ - tdo,tl070wsh30
reg:
maxItems: 1
backlight: true
enable-gpios: true
+ reset-gpios: true
port: true
power-supply: true
- vxt,vl050-8048nt-c01
# Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
- winstar,wf35ltiacd
+ # Yes Optoelectronics YTC700TLAG-05-201C 7" TFT LCD panel
+ - yes-optoelectronics,ytc700tlag-05-201c
backlight: true
enable-gpios: true
- const: vp1
- const: vp2
+ assigned-clocks:
+ minItems: 1
+ maxItems: 3
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 3
+
interrupts:
maxItems: 1
maxItems: 1
description: phandle to the associated power domain
+ dma-coherent:
+ type: boolean
+
ports:
type: object
description:
- const: vp3
- const: vp4
+ assigned-clocks:
+ minItems: 1
+ maxItems: 5
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 5
+
interrupts:
items:
- description: common_m DSS Master common
maxItems: 1
description: phandle to the associated power domain
+ dma-coherent:
+ type: boolean
+
ports:
type: object
description:
description: Trusted Computing Group
"^tcl,.*":
description: Toby Churchill Ltd.
+ "^tdo,.*":
+ description: Shangai Top Display Optoelectronics Co., Ltd
"^technexion,.*":
description: TechNexion
"^technologic,.*":
description: Shenzhen Xunlong Software CO.,Limited
"^xylon,.*":
description: Xylon
+ "^yes-optoelectronics,.*":
+ description: Yes Optoelectronics Co.,Ltd.
"^ylm,.*":
description: Shenzhen Yangliming Electronic Technology Co., Ltd.
"^yna,.*":
.. kernel-doc:: include/linux/dma-buf.h
:internal:
+Buffer Mapping Helpers
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/dma-buf-map.h
+ :doc: overview
+
+.. kernel-doc:: include/linux/dma-buf-map.h
+ :internal:
+
Reservation Objects
-------------------
respectively.
When the last reference to a GEM object is released the GEM core calls
-the :c:type:`struct drm_driver <drm_driver>` gem_free_object_unlocked
+the :c:type:`struct drm_gem_object_funcs <gem_object_funcs>` free
operation. That operation is mandatory for GEM-enabled drivers and must
free the GEM object and all associated resources.
-void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
+void (\*free) (struct drm_gem_object \*obj); Drivers are
responsible for freeing all GEM object resources. This includes the
resources created by the GEM core, which need to be released with
drm_gem_object_release().
One issue with the helpers is that they require that drivers handle completion
events for atomic commits correctly. But fixing these bugs is good anyway.
+Somewhat related is the legacy_cursor_update hack, which should be replaced with
+the new atomic_async_check/commit functionality in the helpers in drivers that
+still look at that flag.
+
Contact: Daniel Vetter, respective driver maintainers
Level: Advanced
``unreference_locked`` depending upon context.
Core GEM doesn't have a need for ``struct_mutex`` any more since kernel 4.8,
-and there's a ``gem_free_object_unlocked`` callback for any drivers which are
+and there's a GEM object ``free`` callback for any drivers which are
entirely ``struct_mutex`` free.
For drivers that need ``struct_mutex`` it should be replaced with a driver-
---------------------------
GEM objects can now have a function table instead of having the callbacks on the
-DRM driver struct. This is now the preferred way and drivers can be moved over.
-
-We also need a 2nd version of the CMA define that doesn't require the
-vmapping to be present (different hook for prime importing). Plus this needs to
-be rolled out to all drivers using their own implementations, too.
+DRM driver struct. This is now the preferred way. Callbacks in drivers have been
+converted, except for struct drm_driver.gem_prime_mmap.
Level: Intermediate
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
-- Drop the return code and error checking from all debugfs functions. Greg KH is
- working on this already.
-
Contact: Daniel Vetter
Level: Intermediate
TODO
====
-CRC API Improvements
---------------------
-
-- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
-
-- Use the alpha value to blend vaddr_src with vaddr_dst instead of
- overwriting it in ``blend()``.
-
-- Add igt test to check cleared alpha value for XRGB plane format.
-
-- Add igt test to check extreme alpha values i.e. fully opaque and fully
- transparent (intermediate values are affected by hw-specific rounding modes).
-
-Runtime Configuration
----------------------
-
-We want to be able to reconfigure vkms instance without having to reload the
-module. Use/Test-cases:
-
-- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
- compositors).
+If you want to do any of the items listed below, please share your interest
+with VKMS maintainers.
-- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
- them first).
+IGT better support
+------------------
-- Change output configuration: Plug/unplug screens, change EDID, allow changing
- the refresh rate.
+- Investigate: (1) test cases on kms_plane that are failing due to timeout on
+ capturing CRC; (2) when running kms_flip test cases in sequence, some
+ successful individual test cases are failing randomly.
-The currently proposed solution is to expose vkms configuration through
-configfs. All existing module options should be supported through configfs too.
+- VKMS already has support for vblanks simulated via hrtimers, which can be
+ tested with kms_flip test; in some way, we can say that VKMS already mimics
+ the real hardware vblank. However, we also have virtual hardware that does
+ not support vblank interrupt and completes page_flip events right away; in
+ this case, compositor developers may end up creating a busy loop on virtual
+ hardware. It would be useful to support Virtual Hardware behavior in VKMS
+ because this can help compositor developers to test their features in
+ multiple scenarios.
Add Plane Features
------------------
- Additional buffer formats, especially YUV formats for video like NV12.
Low/high bpp RGB formats would also be interesting.
-- Async updates (currently only possible on cursor plane using the legacy cursor
- api).
+- Async updates (currently only possible on cursor plane using the legacy
+ cursor api).
+
+For all of these, we also want to review the igt test coverage and make sure
+all relevant igt testcases work on vkms.
+
+Prime Buffer Sharing
+--------------------
-For all of these, we also want to review the igt test coverage and make sure all
-relevant igt testcases work on vkms.
+- Syzbot report - WARNING in vkms_gem_free_object:
+ https://syzkaller.appspot.com/bug?extid=e7ad70d406e74d8fc9d0
+
+Runtime Configuration
+---------------------
+
+We want to be able to reconfigure vkms instance without having to reload the
+module. Use/Test-cases:
+
+- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling
+ of compositors).
+
+- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
+ them first).
+
+- Change output configuration: Plug/unplug screens, change EDID, allow changing
+ the refresh rate.
+
+The currently proposed solution is to expose vkms configuration through
+configfs. All existing module options should be supported through configfs
+too.
Writeback support
-----------------
-Currently vkms only computes a CRC for each frame. Once we have additional plane
-features, we could write back the entire composited frame, and expose it as:
+- The writeback and CRC capture operations share the use of composer_enabled
+ boolean to ensure vblanks. Probably, when these operations work together,
+ composer_enabled needs to refcounting the composer state to proper work.
-- Writeback connector. This is useful for testing compositors if you don't have
- hardware with writeback support.
+- Add support for cloned writeback outputs and related test cases using a
+ cloned output in the IGT kms_writeback.
- As a v4l device. This is useful for debugging compositors on special vkms
configurations, so that developers see what's really going on.
-Prime Buffer Sharing
---------------------
-
-We already have vgem, which is a gem driver for testing rendering, similar to
-how vkms is for testing the modeset side. Adding buffer sharing support to vkms
-allows us to test them together, to test synchronization and lots of other
-features. Also, this allows compositors to test whether they work correctly on
-SoC chips, where the display and rendering is very often split between 2
-drivers.
-
Output Features
---------------
- Add support for link status, so that compositors can validate their runtime
fallbacks when e.g. a Display Port link goes bad.
-- All the hotplug handling describe under "Runtime Configuration".
+CRC API Improvements
+--------------------
+
+- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
Atomic Check using eBPF
-----------------------
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
+DRM DRIVER FOR NOVATEK NT36672A PANELS
+M: Sumit Semwal <sumit.semwal@linaro.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Ben Skeggs <bskeggs@redhat.com>
L: dri-devel@lists.freedesktop.org
DRM DRIVERS FOR VC4
M: Eric Anholt <eric@anholt.net>
+M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
-M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org
-S: Maintained
+S: Orphan
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/fb/
};
};
};
+
+&mali {
+ dma-coherent;
+};
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
attach->dir = direction;
}
+#ifdef CONFIG_DMA_API_DEBUG
+ {
+ struct scatterlist *sg;
+ u64 addr;
+ int len;
+ int i;
+
+ for_each_sgtable_dma_sg(sg_table, sg, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+ pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+ __func__, addr, len);
+ }
+ }
+ }
+#endif /* CONFIG_DMA_API_DEBUG */
+
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
*
- * Returns NULL on error.
+ * Returns 0 on success, or a negative errno code otherwise.
*/
-void *dma_buf_vmap(struct dma_buf *dmabuf)
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
- void *ptr;
+ struct dma_buf_map ptr;
+ int ret = 0;
+
+ dma_buf_map_clear(map);
if (WARN_ON(!dmabuf))
- return NULL;
+ return -EINVAL;
if (!dmabuf->ops->vmap)
- return NULL;
+ return -EINVAL;
mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
- BUG_ON(!dmabuf->vmap_ptr);
- ptr = dmabuf->vmap_ptr;
+ BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
+ *map = dmabuf->vmap_ptr;
goto out_unlock;
}
- BUG_ON(dmabuf->vmap_ptr);
+ BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
- ptr = dmabuf->ops->vmap(dmabuf);
- if (WARN_ON_ONCE(IS_ERR(ptr)))
- ptr = NULL;
- if (!ptr)
+ ret = dmabuf->ops->vmap(dmabuf, &ptr);
+ if (WARN_ON_ONCE(ret))
goto out_unlock;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
+ *map = dmabuf->vmap_ptr;
+
out_unlock:
mutex_unlock(&dmabuf->lock);
- return ptr;
+ return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
- * @vaddr: [in] vmap to vunmap
+ * @map: [in] vmap pointer to vunmap
*/
-void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
if (WARN_ON(!dmabuf))
return;
- BUG_ON(!dmabuf->vmap_ptr);
+ BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
- BUG_ON(dmabuf->vmap_ptr != vaddr);
+ BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
- dmabuf->vmap_ptr = NULL;
+ dmabuf->ops->vunmap(dmabuf, map);
+ dma_buf_map_clear(&dmabuf->vmap_ptr);
}
mutex_unlock(&dmabuf->lock);
}
{
struct dma_resv_list *list;
- list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+ list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
if (!list)
return NULL;
return 0;
}
-static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
void *vaddr;
vaddr = dma_heap_buffer_vmap_get(buffer);
mutex_unlock(&buffer->lock);
- return vaddr;
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_DMA_PAGE_POOL
- bool
- depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
- default y
- help
- Choose this if you need the TTM dma page pool
-
config DRM_VRAM_HELPER
tristate
depends on DRM
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
select CRC32
default n
help
}
}
- if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
+ if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
amdgpu_bo_fence(bo,
&avm->process_info->eviction_fence->base,
true);
* required.
*/
if (mem->mapped_to_gpu_memory == 0 &&
- !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
+ !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
+ !mem->bo->tbo.pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);
uint32_t domain;
int r;
- if (bo->pin_count)
+ if (bo->tbo.pin_count)
return 0;
/* Don't move this buffer if we have depleted our allowance
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
+ struct ttm_resource_manager *man;
int r;
r = pm_runtime_get_sync(dev->dev);
return r;
}
- seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+ r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ seq_printf(m, "(%d)\n", r);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_abo);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to unpin buffer after flip\n");
- }
+ amdgpu_bo_unpin(work->old_abo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
}
unpin:
if (!adev->enable_virtual_display)
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
- DRM_ERROR("failed to unpin new abo in error path\n");
+ amdgpu_bo_unpin(new_abo);
unreserve:
amdgpu_bo_unreserve(new_abo);
struct sg_table *sgt;
long r;
- if (!bo->pin_count) {
+ if (!bo->tbo.pin_count) {
/* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
if (unlikely(ret != 0))
return ret;
- if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (!bo->tbo.pin_count &&
+ (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
.lastclose = amdgpu_driver_lastclose_kms,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
- .gem_free_object_unlocked = amdgpu_gem_object_free,
- .gem_open_object = amdgpu_gem_object_open,
- .gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
.fops = &amdgpu_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_vmap = amdgpu_gem_prime_vmap,
- .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,
#include "amdgpu.h"
#include "amdgpu_display.h"
+#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
-void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+
+static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
return r;
}
*obj = &bo->tbo.base;
+ (*obj)->funcs = &amdgpu_gem_object_funcs;
return 0;
}
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
return 0;
}
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static void amdgpu_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
ttm_eu_backoff_reservation(&ticket, &list);
}
+static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+ .free = amdgpu_gem_object_free,
+ .open = amdgpu_gem_object_open,
+ .close = amdgpu_gem_object_close,
+ .export = amdgpu_gem_prime_export,
+ .vmap = amdgpu_gem_prime_vmap,
+ .vunmap = amdgpu_gem_prime_vunmap,
+};
+
/*
* GEM ioctls.
*/
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- pin_count = READ_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->tbo.pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
#define AMDGPU_GEM_DOMAIN_MAX 0x3
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
-void amdgpu_gem_object_free(struct drm_gem_object *obj);
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
/*
uint64_t *addr, uint64_t *flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_dma_tt *ttm;
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
- ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
- *addr = ttm->dma_address[0];
+ *addr = bo->tbo.ttm->dma_address[0];
break;
case TTM_PL_VRAM:
*addr = amdgpu_bo_gpu_offset(bo);
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct ttm_dma_tt *ttm;
- if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
+ if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
- ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
- if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+ if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
return AMDGPU_BO_INVALID_OFFSET;
- return adev->gmc.agp_start + ttm->dma_address[0];
+ return adev->gmc.agp_start + bo->ttm->dma_address[0];
}
/**
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
- if (bo->pin_count > 0)
+ if (bo->tbo.pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
amdgpu_bo_kunmap(bo);
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_VRAM;
- places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_TT;
places[c].flags = 0;
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- places[c].flags |= TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- else
- places[c].flags |= TTM_PL_FLAG_CACHED;
c++;
}
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = 0;
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- places[c].flags |= TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- else
- places[c].flags |= TTM_PL_FLAG_CACHED;
c++;
}
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GDS;
- places[c].flags = TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
c++;
}
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GWS;
- places[c].flags = TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
c++;
}
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_OA;
- places[c].flags = TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
c++;
}
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
- places[c].flags = TTM_PL_MASK_CACHING;
+ places[c].flags = 0;
c++;
}
uint32_t domain;
int r;
- if (bo->pin_count)
+ if (bo->tbo.pin_count)
return 0;
domain = bo->preferred_domains;
*/
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
- if (bo->pin_count) {
+ if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
- bo->pin_count++;
+ ttm_bo_pin(&bo->tbo);
if (max_offset != 0) {
u64 domain_start = amdgpu_ttm_domain_start(adev,
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
goto error;
}
- bo->pin_count = 1;
+ ttm_bo_pin(&bo->tbo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_operation_ctx ctx = { false, false };
- int r, i;
-
- if (WARN_ON_ONCE(!bo->pin_count)) {
- dev_warn(adev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
+ ttm_bo_unpin(&bo->tbo);
+ if (bo->tbo.pin_count)
+ return;
amdgpu_bo_subtract_pin_size(bo);
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
-
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r))
- dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-
- return r;
}
/**
*/
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
+ struct ttm_resource_manager *man;
+
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
#ifndef CONFIG_HIBERNATION
if (adev->flags & AMD_IS_APU) {
return 0;
}
#endif
- return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
}
static const char *amdgpu_vram_names[] = {
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo *abo;
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
unsigned long offset, size;
int r;
- if (!amdgpu_bo_is_amdgpu_bo(bo))
- return 0;
-
- abo = ttm_to_amdgpu_bo(bo);
-
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return 0;
/* Can't move a pinned BO to visible VRAM */
- if (abo->pin_count > 0)
- return -EINVAL;
+ if (abo->tbo.pin_count > 0)
+ return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
abo->placement.busy_placement = &abo->placements[1];
r = ttm_bo_validate(bo, &abo->placement, &ctx);
- if (unlikely(r != 0))
- return r;
+ if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(r))
+ return VM_FAULT_SIGBUS;
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
(offset + size) > adev->gmc.visible_vram_size)
- return -EINVAL;
+ return VM_FAULT_SIGBUS;
+ ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
- !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
+ !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u64 flags;
- unsigned pin_count;
u64 tiling_flags;
u64 metadata_flags;
void *metadata;
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset);
-int amdgpu_bo_unpin(struct amdgpu_bo *bo);
+void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
int amdgpu_bo_late_init(struct amdgpu_device *adev);
bool evict,
struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
/* Don't handle scatter gather BOs */
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
- struct ttm_dma_tt *dma;
dma_addr_t *dma_address;
- dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
- dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+ dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
cpu_addr);
if (r)
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
+ placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n");
return r;
}
- /* set caching flags */
- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
goto out_cleanup;
goto out_cleanup;
}
- /* move BO (in tmp_mem) to new_mem */
- r = ttm_bo_move_ttm(bo, ctx, new_mem);
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (unlikely(r))
+ goto out_cleanup;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
+ placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n");
}
/* move/bind old memory to GTT space */
- r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(r))
+ return r;
+
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
+ ttm_bo_assign_mem(bo, &tmp_mem);
/* copy to VRAM */
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (unlikely(r)) {
struct ttm_resource *old_mem = &bo->mem;
int r;
+ if (new_mem->mem_type == TTM_PL_TT) {
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
+ if (r)
+ return r;
+ }
+
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+
/* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
- if (WARN_ON_ONCE(abo->pin_count > 0))
+ if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
return -EINVAL;
adev = amdgpu_ttm_adev(bo->bdev);
ttm_bo_move_null(bo, new_mem);
return 0;
}
- if ((old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM) ||
- (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enough */
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
+
+ if (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (r)
+ goto fail;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
+
if (old_mem->mem_type == AMDGPU_PL_GDS ||
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
- return r;
+ goto fail;
}
r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r)
- return r;
+ goto fail;
}
if (bo->type == ttm_bo_type_device &&
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0;
+fail:
+ swap(*new_mem, bo->mem);
+ amdgpu_bo_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ return r;
}
/**
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
* TTM backend functions.
*/
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
if (!gtt || !gtt->userptr)
return false;
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
+ DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
gart_bind_fail:
if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
return r;
}
}
if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
ttm->pages, gtt->ttm.dma_address, flags);
if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
gtt->bound = true;
return r;
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
if (r)
- DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
+ DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
+ gtt->ttm.num_pages, gtt->offset);
gtt->bound = false;
}
if (gtt->usertask)
put_task_struct(gtt->usertask);
- ttm_dma_tt_fini(>t->ttm);
+ ttm_tt_fini(>t->ttm);
kfree(gtt);
}
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt;
+ enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
}
gtt->gobj = &bo->base;
+ if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
+
/* allocate space for the uninitialized page entries */
- if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
+ if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
- return >t->ttm.ttm;
+ return >t->ttm;
}
/**
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm_tt_set_populated(ttm);
return 0;
}
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
-#ifdef CONFIG_SWIOTLB
- if (adev->need_swiotlb && swiotlb_nr_tbl()) {
- return ttm_dma_populate(>t->ttm, adev->dev, ctx);
- }
-#endif
-
- /* fall back to generic helper to populate the page array
- * and map them to the device */
- return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
+ return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
}
/**
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
return;
adev = amdgpu_ttm_adev(bdev);
-
-#ifdef CONFIG_SWIOTLB
- if (adev->need_swiotlb && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(>t->ttm, adev->dev);
- return;
- }
-#endif
-
- /* fall back to generic helper to unmap and unpopulate array */
- ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
+ return ttm_pool_free(&adev->mman.bdev.pool, ttm);
}
/**
/* Return false if no part of the ttm_tt object lies within
* the range
*/
- size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching_state == tt_cached)
+ if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
return ret;
}
+static void
+amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ amdgpu_bo_move_notify(bo, false, NULL);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .ttm_tt_bind = &amdgpu_ttm_backend_bind,
- .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
- .move_notify = &amdgpu_bo_move_notify,
+ .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
- .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory,
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev,
- &amdgpu_bo_driver,
+ r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
+ adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
adev->mman.buffer_funcs_enabled = enable;
}
+static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_fault_reserve_notify(bo);
+ if (ret)
+ goto unlock;
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+ .fault = amdgpu_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
+ int r;
- if (adev == NULL)
- return -EINVAL;
+ r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ if (unlikely(r != 0))
+ return r;
- return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ vma->vm_ops = &amdgpu_ttm_vm_ops;
+ return 0;
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
return 0;
}
+static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
+}
+
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+ {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
};
/**
}
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
-
-#ifdef CONFIG_SWIOTLB
- if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
- --count;
-#endif
-
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
return 0;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+ if (bo->pin_count)
return;
abo = ttm_to_amdgpu_bo(bo);
resv = vm->root.base.bo->tbo.base.resv;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- struct ttm_dma_tt *ttm;
resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
}
mem = &bo->tbo.mem;
nodes = mem->mm_node;
- if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
- pages_addr = ttm->dma_address;
- }
+ if (mem->mem_type == TTM_PL_TT)
+ pages_addr = bo->tbo.ttm->dma_address;
}
if (bo) {
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;
}
static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
}
static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
static void
komeda_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+ crtc);
pm_runtime_get_sync(crtc->dev->dev);
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
struct komeda_pipeline *master = kcrtc->master;
return 0;
}
-static int komeda_register_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, komeda_register_show, inode->i_private);
-}
-
-static const struct file_operations komeda_register_fops = {
- .owner = THIS_MODULE,
- .open = komeda_register_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(komeda_register);
#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
goto disable_clk;
}
- dev->dma_parms = &mdev->dma_parms;
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, U32_MAX);
mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
if (!mdev->iommu)
struct device *dev;
/** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
- /** @dma_parms: the dma parameters of komeda */
- struct device_dma_parameters dma_parms;
/** @chip: the basic chip information */
struct komeda_chip_info chip;
}
static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
}
static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
}
static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
}
static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
int err;
}
static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct drm_pending_vblank_event *event;
}
static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
static struct drm_driver armada_drm_driver = {
.lastclose = drm_fb_helper_lastclose,
- .gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = armada_gem_prime_export,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
- .gem_vm_ops = &armada_gem_vm_ops,
.major = 1,
.minor = 0,
.name = "armada-drm",
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
-const struct vm_operations_struct armada_gem_vm_ops = {
+static const struct vm_operations_struct armada_gem_vm_ops = {
.fault = armada_gem_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
return dobj->addr;
}
+static const struct drm_gem_object_funcs armada_gem_object_funcs = {
+ .free = armada_gem_free_object,
+ .export = armada_gem_prime_export,
+ .vm_ops = &armada_gem_vm_ops,
+};
+
struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
{
if (!obj)
return NULL;
+ obj->obj.funcs = &armada_gem_object_funcs;
+
drm_gem_private_object_init(dev, &obj->obj, size);
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
if (!obj)
return NULL;
+ obj->obj.funcs = &armada_gem_object_funcs;
+
if (drm_gem_object_init(dev, &obj->obj, size)) {
kfree(obj);
return NULL;
void *update_data;
};
-extern const struct vm_operations_struct armada_gem_vm_ops;
-
#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
void armada_gem_free_object(struct drm_gem_object *);
tristate "ASPEED BMC Display Controller"
depends on DRM && OF
depends on (COMPILE_TEST || ARCH_ASPEED)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
/* CTRL2 */
#define CRT_CTRL_DAC_EN BIT(0)
#define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
-#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31)
+#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(31, 20)
/* CRT_HORIZ0 */
#define CRT_H_TOTAL(x) (x)
static struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_create_object = drm_gem_cma_create_object_default_funcs,
- .dumb_create = drm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
{ }
};
+#define ASPEED_SCU_VGA0 0x50
+#define ASPEED_SCU_MISC_CTRL 0x2c
+
+static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_gfx *priv = dev_get_drvdata(dev);
+ u32 val;
+ int rc;
+
+ rc = kstrtou32(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val > 3)
+ return -EINVAL;
+
+ rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16);
+ if (rc < 0)
+ return 0;
+
+ return count;
+}
+
+static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct aspeed_gfx *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int rc;
+
+ rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, ®);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%u\n", (reg >> 16) & 0x3);
+}
+static DEVICE_ATTR_RW(dac_mux);
+
+static ssize_t
+vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct aspeed_gfx *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int rc;
+
+ rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, ®);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%u\n", reg & 1);
+}
+static DEVICE_ATTR_RO(vga_pw);
+
+static struct attribute *aspeed_sysfs_entries[] = {
+ &dev_attr_vga_pw.attr,
+ &dev_attr_dac_mux.attr,
+ NULL,
+};
+
+static struct attribute_group aspeed_sysfs_attr_group = {
+ .attrs = aspeed_sysfs_entries,
+};
+
static int aspeed_gfx_probe(struct platform_device *pdev)
{
struct aspeed_gfx *priv;
if (ret)
return ret;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
+ if (ret)
+ return ret;
+
ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
return 0;
err_unload:
+ sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
aspeed_gfx_unload(&priv->drm);
return ret;
{
struct drm_device *drm = platform_get_drvdata(pdev);
+ sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
case DRM_MODE_DPMS_SUSPEND:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
- ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
return 0;
}
+static void
+ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+{
+ struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
+ struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (old_ast_crtc_state->format != ast_crtc_state->format)
+ ast_crtc_load_lut(ast, crtc);
+}
+
static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
static void
ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
+ .atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
}
static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
}
static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
return 0;
}
-int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+static int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
struct adv7511 *adv7511 = dev_get_drvdata(dev);
unsigned int audio_source, i2s_format = 0;
config DRM_ANALOGIX_DP
tristate
depends on DRM
+
+config DRM_ANALOGIX_ANX7625
+ tristate "Analogix Anx7625 MIPI to DP interface support"
+ depends on DRM
+ depends on OF
+ help
+ ANX7625 is an ultra-low power 4K mobile HD transmitter
+ designed for portable devices. It converts MIPI/DPI to
+ DisplayPort1.3 4K.
# SPDX-License-Identifier: GPL-2.0-only
analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX7625) += anx7625.o
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
}
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
+static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
{
int reg;
int retval = 0;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
+ *
+ */
+#include <linux/gcd.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+
+#include "anx7625.h"
+
+/*
+ * There is a sync issue while access I2C register between AP(CPU) and
+ * internal firmware(OCM), to avoid the race condition, AP should access
+ * the reserved slave address before slave address occurs changes.
+ */
+static int i2c_access_workaround(struct anx7625_data *ctx,
+ struct i2c_client *client)
+{
+ u8 offset;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (client == ctx->last_client)
+ return 0;
+
+ ctx->last_client = client;
+
+ if (client == ctx->i2c.tcpc_client)
+ offset = RSVD_00_ADDR;
+ else if (client == ctx->i2c.tx_p0_client)
+ offset = RSVD_D1_ADDR;
+ else if (client == ctx->i2c.tx_p1_client)
+ offset = RSVD_60_ADDR;
+ else if (client == ctx->i2c.rx_p0_client)
+ offset = RSVD_39_ADDR;
+ else if (client == ctx->i2c.rx_p1_client)
+ offset = RSVD_7F_ADDR;
+ else
+ offset = RSVD_00_ADDR;
+
+ ret = i2c_smbus_write_byte_data(client, offset, 0x00);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev,
+ "fail to access i2c id=%x\n:%x",
+ client->addr, offset);
+
+ return ret;
+}
+
+static int anx7625_reg_read(struct anx7625_data *ctx,
+ struct i2c_client *client, u8 reg_addr)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_read_byte_data(client, reg_addr);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "read i2c fail id=%x:%x\n",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
+static int anx7625_reg_block_read(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 reg_addr, u8 len, u8 *buf)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "read i2c block fail id=%x:%x\n",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
+static int anx7625_reg_write(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 reg_addr, u8 reg_val)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_write_byte_data(client, reg_addr, reg_val);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "fail to write i2c id=%x\n:%x",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
+static int anx7625_write_or(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 offset, u8 mask)
+{
+ int val;
+
+ val = anx7625_reg_read(ctx, client, offset);
+ if (val < 0)
+ return val;
+
+ return anx7625_reg_write(ctx, client, offset, (val | (mask)));
+}
+
+static int anx7625_write_and(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 offset, u8 mask)
+{
+ int val;
+
+ val = anx7625_reg_read(ctx, client, offset);
+ if (val < 0)
+ return val;
+
+ return anx7625_reg_write(ctx, client, offset, (val & (mask)));
+}
+
+static int anx7625_write_and_or(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 offset, u8 and_mask, u8 or_mask)
+{
+ int val;
+
+ val = anx7625_reg_read(ctx, client, offset);
+ if (val < 0)
+ return val;
+
+ return anx7625_reg_write(ctx, client,
+ offset, (val & and_mask) | (or_mask));
+}
+
+static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS);
+}
+
+static int wait_aux_op_finish(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int val;
+ int ret;
+
+ ret = readx_poll_timeout(anx7625_read_ctrl_status_p0,
+ ctx, val,
+ (!(val & AP_AUX_CTRL_OP_EN) || (val < 0)),
+ 2000,
+ 2000 * 150);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "aux operation fail!\n");
+ return -EIO;
+ }
+
+ val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS);
+ if (val < 0 || (val & 0x0F)) {
+ DRM_DEV_ERROR(dev, "aux status %02x\n", val);
+ val = -EIO;
+ }
+
+ return val;
+}
+
+static int anx7625_video_mute_control(struct anx7625_data *ctx,
+ u8 status)
+{
+ int ret;
+
+ if (status) {
+ /* Set mute on flag */
+ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_MUTE);
+ /* Clear mipi RX en */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_RX_EN);
+ } else {
+ /* Mute off flag */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+ /* Set MIPI RX EN */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_RX_EN);
+ }
+
+ return ret;
+}
+
+static int anx7625_config_audio_input(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ /* Channel num */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5);
+
+ /* FS */
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_4,
+ 0xf0, AUDIO_FS_48K);
+ /* Word length */
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_5,
+ 0xf0, AUDIO_W_LEN_24_24MAX);
+ /* I2S */
+ ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE);
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE);
+ /* Audio change flag */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_AUDIO_CHG);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "fail to config audio.\n");
+
+ return ret;
+}
+
+/* Reduction of fraction a/b */
+static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b)
+{
+ unsigned long gcd_num;
+ unsigned long tmp_a, tmp_b;
+ u32 i = 1;
+
+ gcd_num = gcd(*a, *b);
+ *a /= gcd_num;
+ *b /= gcd_num;
+
+ tmp_a = *a;
+ tmp_b = *b;
+
+ while ((*a > MAX_UNSIGNED_24BIT) || (*b > MAX_UNSIGNED_24BIT)) {
+ i++;
+ *a = tmp_a / i;
+ *b = tmp_b / i;
+ }
+
+ /*
+ * In the end, make a, b larger to have higher ODFC PLL
+ * output frequency accuracy
+ */
+ while ((*a < MAX_UNSIGNED_24BIT) && (*b < MAX_UNSIGNED_24BIT)) {
+ *a <<= 1;
+ *b <<= 1;
+ }
+
+ *a >>= 1;
+ *b >>= 1;
+}
+
+static int anx7625_calculate_m_n(u32 pixelclock,
+ unsigned long *m,
+ unsigned long *n,
+ u8 *post_divider)
+{
+ if (pixelclock > PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN) {
+ /* Pixel clock frequency is too high */
+ DRM_ERROR("pixelclock too high, act(%d), maximum(%lu)\n",
+ pixelclock,
+ PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN);
+ return -EINVAL;
+ }
+
+ if (pixelclock < PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX) {
+ /* Pixel clock frequency is too low */
+ DRM_ERROR("pixelclock too low, act(%d), maximum(%lu)\n",
+ pixelclock,
+ PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX);
+ return -EINVAL;
+ }
+
+ for (*post_divider = 1;
+ pixelclock < (PLL_OUT_FREQ_MIN / (*post_divider));)
+ *post_divider += 1;
+
+ if (*post_divider > POST_DIVIDER_MAX) {
+ for (*post_divider = 1;
+ (pixelclock <
+ (PLL_OUT_FREQ_ABS_MIN / (*post_divider)));)
+ *post_divider += 1;
+
+ if (*post_divider > POST_DIVIDER_MAX) {
+ DRM_ERROR("cannot find property post_divider(%d)\n",
+ *post_divider);
+ return -EDOM;
+ }
+ }
+
+ /* Patch to improve the accuracy */
+ if (*post_divider == 7) {
+ /* 27,000,000 is not divisible by 7 */
+ *post_divider = 8;
+ } else if (*post_divider == 11) {
+ /* 27,000,000 is not divisible by 11 */
+ *post_divider = 12;
+ } else if ((*post_divider == 13) || (*post_divider == 14)) {
+ /* 27,000,000 is not divisible by 13 or 14 */
+ *post_divider = 15;
+ }
+
+ if (pixelclock * (*post_divider) > PLL_OUT_FREQ_ABS_MAX) {
+ DRM_ERROR("act clock(%u) large than maximum(%lu)\n",
+ pixelclock * (*post_divider),
+ PLL_OUT_FREQ_ABS_MAX);
+ return -EDOM;
+ }
+
+ *m = pixelclock;
+ *n = XTAL_FRQ / (*post_divider);
+
+ anx7625_reduction_of_a_fraction(m, n);
+
+ return 0;
+}
+
+static int anx7625_odfc_config(struct anx7625_data *ctx,
+ u8 post_divider)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Config input reference clock frequency 27MHz/19.2MHz */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
+ ~(REF_CLK_27000KHZ << MIPI_FREF_D_IND));
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
+ (REF_CLK_27000KHZ << MIPI_FREF_D_IND));
+ /* Post divider */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
+ MIPI_DIGITAL_PLL_8, 0x0f);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_8,
+ post_divider << 4);
+
+ /* Add patch for MIS2-125 (5pcs ANX7625 fail ATE MBIST test) */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+ ~MIPI_PLL_VCO_TUNE_REG_VAL);
+
+ /* Reset ODFC PLL */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+ ~MIPI_PLL_RESET_N);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+ MIPI_PLL_RESET_N);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error.\n");
+
+ return ret;
+}
+
+static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ unsigned long m, n;
+ u16 htotal;
+ int ret;
+ u8 post_divider = 0;
+
+ ret = anx7625_calculate_m_n(ctx->dt.pixelclock.min * 1000,
+ &m, &n, &post_divider);
+
+ if (ret) {
+ DRM_DEV_ERROR(dev, "cannot get property m n value.\n");
+ return ret;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "compute M(%lu), N(%lu), divider(%d).\n",
+ m, n, post_divider);
+
+ /* Configure pixel clock */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_L,
+ (ctx->dt.pixelclock.min / 1000) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_H,
+ (ctx->dt.pixelclock.min / 1000) >> 8);
+ /* Lane count */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_0, 0xfc);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_0, 3);
+
+ /* Htotal */
+ htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min +
+ ctx->dt.hback_porch.min + ctx->dt.hsync_len.min;
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_TOTAL_PIXELS_L, htotal & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_TOTAL_PIXELS_H, htotal >> 8);
+ /* Hactive */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_ACTIVE_PIXELS_L, ctx->dt.hactive.min & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_ACTIVE_PIXELS_H, ctx->dt.hactive.min >> 8);
+ /* HFP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_FRONT_PORCH_L, ctx->dt.hfront_porch.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_FRONT_PORCH_H,
+ ctx->dt.hfront_porch.min >> 8);
+ /* HWS */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_SYNC_WIDTH_L, ctx->dt.hsync_len.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_SYNC_WIDTH_H, ctx->dt.hsync_len.min >> 8);
+ /* HBP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_BACK_PORCH_L, ctx->dt.hback_porch.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_BACK_PORCH_H, ctx->dt.hback_porch.min >> 8);
+ /* Vactive */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_L,
+ ctx->dt.vactive.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_H,
+ ctx->dt.vactive.min >> 8);
+ /* VFP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ VERTICAL_FRONT_PORCH, ctx->dt.vfront_porch.min);
+ /* VWS */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ VERTICAL_SYNC_WIDTH, ctx->dt.vsync_len.min);
+ /* VBP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ VERTICAL_BACK_PORCH, ctx->dt.vback_porch.min);
+ /* M value */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_M_NUM_23_16, (m >> 16) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_M_NUM_15_8, (m >> 8) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_M_NUM_7_0, (m & 0xff));
+ /* N value */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_N_NUM_23_16, (n >> 16) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_N_NUM_15_8, (n >> 8) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_7_0,
+ (n & 0xff));
+ /* Diff */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_DIGITAL_ADJ_1, 0x3D);
+
+ ret |= anx7625_odfc_config(ctx, post_divider - 1);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "mipi dsi setup IO error.\n");
+
+ return ret;
+}
+
+static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx)
+{
+ int val;
+ struct device *dev = &ctx->client->dev;
+
+ /* Swap MIPI-DSI data lane 3 P and N */
+ val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP);
+ if (val < 0) {
+ DRM_DEV_ERROR(dev, "IO error : access MIPI_SWAP.\n");
+ return -EIO;
+ }
+
+ val |= (1 << MIPI_SWAP_CH3);
+ return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP, val);
+}
+
+static int anx7625_api_dsi_config(struct anx7625_data *ctx)
+
+{
+ int val, ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Swap MIPI-DSI data lane 3 P and N */
+ ret = anx7625_swap_dsi_lane3(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : swap dsi lane 3 fail.\n");
+ return ret;
+ }
+
+ /* DSI clock settings */
+ val = (0 << MIPI_HS_PWD_CLK) |
+ (0 << MIPI_HS_RT_CLK) |
+ (0 << MIPI_PD_CLK) |
+ (1 << MIPI_CLK_RT_MANUAL_PD_EN) |
+ (1 << MIPI_CLK_HS_MANUAL_PD_EN) |
+ (0 << MIPI_CLK_DET_DET_BYPASS) |
+ (0 << MIPI_CLK_MISS_CTRL) |
+ (0 << MIPI_PD_LPTX_CH_MANUAL_PD_EN);
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PHY_CONTROL_3, val);
+
+ /*
+ * Decreased HS prepare timing delay from 160ns to 80ns work with
+ * a) Dragon board 810 series (Qualcomm AP)
+ * b) Moving Pixel DSI source (PG3A pattern generator +
+ * P332 D-PHY Probe) default D-PHY timing
+ * 5ns/step
+ */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_TIME_HS_PRPR, 0x10);
+
+ /* Enable DSI mode*/
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_18,
+ SELECT_DSI << MIPI_DPI_SELECT);
+
+ ret |= anx7625_dsi_video_timing_config(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "dsi video timing config fail\n");
+ return ret;
+ }
+
+ /* Toggle m, n ready */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6,
+ ~(MIPI_M_NUM_READY | MIPI_N_NUM_READY));
+ usleep_range(1000, 1100);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6,
+ MIPI_M_NUM_READY | MIPI_N_NUM_READY);
+
+ /* Configure integer stable register */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_VIDEO_STABLE_CNT, 0x02);
+ /* Power on MIPI RX */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_10, 0x00);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_10, 0x80);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : mipi dsi enable init fail.\n");
+
+ return ret;
+}
+
+static int anx7625_dsi_config(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n");
+
+ /* DSC disable */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ R_DSC_CTRL_0, ~DSC_EN);
+
+ ret |= anx7625_api_dsi_config(ctx);
+
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : api dsi config error.\n");
+ return ret;
+ }
+
+ /* Set MIPI RX EN */
+ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_RX_EN);
+ /* Clear mute flag */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : enable mipi rx fail.\n");
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "success to config DSI\n");
+
+ return ret;
+}
+
+static void anx7625_dp_start(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ if (!ctx->display_timing_valid) {
+ DRM_DEV_ERROR(dev, "mipi not set display timing yet.\n");
+ return;
+ }
+
+ anx7625_config_audio_input(ctx);
+
+ ret = anx7625_dsi_config(ctx);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "MIPI phy setup error.\n");
+}
+
+static void anx7625_dp_stop(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n");
+
+ /*
+ * Video disable: 0x72:08 bit 7 = 0;
+ * Audio disable: 0x70:87 bit 0 = 0;
+ */
+ ret = anx7625_write_and(ctx, ctx->i2c.tx_p0_client, 0x87, 0xfe);
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f);
+
+ ret |= anx7625_video_mute_control(ctx, 1);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
+}
+
+static int sp_tx_rst_aux(struct anx7625_data *ctx)
+{
+ int ret;
+
+ ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, RST_CTRL2,
+ AUX_RST);
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, RST_CTRL2,
+ ~AUX_RST);
+ return ret;
+}
+
+static int sp_tx_aux_wr(struct anx7625_data *ctx, u8 offset)
+{
+ int ret;
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START, offset);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, 0x04);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+ return (ret | wait_aux_op_finish(ctx));
+}
+
+static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd)
+{
+ int ret;
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, len_cmd);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+ return (ret | wait_aux_op_finish(ctx));
+}
+
+static int sp_tx_get_edid_block(struct anx7625_data *ctx)
+{
+ int c = 0;
+ struct device *dev = &ctx->client->dev;
+
+ sp_tx_aux_wr(ctx, 0x7e);
+ sp_tx_aux_rd(ctx, 0x01);
+ c = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START);
+ if (c < 0) {
+ DRM_DEV_ERROR(dev, "IO error : access AUX BUFF.\n");
+ return -EIO;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, " EDID Block = %d\n", c + 1);
+
+ if (c > MAX_EDID_BLOCK)
+ c = 1;
+
+ return c;
+}
+
+static int edid_read(struct anx7625_data *ctx,
+ u8 offset, u8 *pblock_buf)
+{
+ int ret, cnt;
+ struct device *dev = &ctx->client->dev;
+
+ for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
+ sp_tx_aux_wr(ctx, offset);
+ /* Set I2C read com 0x01 mot = 0 and read 16 bytes */
+ ret = sp_tx_aux_rd(ctx, 0xf1);
+
+ if (ret) {
+ sp_tx_rst_aux(ctx);
+ DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
+ } else {
+ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START,
+ MAX_DPCD_BUFFER_SIZE,
+ pblock_buf);
+ if (ret > 0)
+ break;
+ }
+ }
+
+ if (cnt > EDID_TRY_CNT)
+ return -EIO;
+
+ return 0;
+}
+
+static int segments_edid_read(struct anx7625_data *ctx,
+ u8 segment, u8 *buf, u8 offset)
+{
+ u8 cnt;
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Write address only */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, 0x30);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, 0x04);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS,
+ AP_AUX_CTRL_ADDRONLY | AP_AUX_CTRL_OP_EN);
+
+ ret |= wait_aux_op_finish(ctx);
+ /* Write segment address */
+ ret |= sp_tx_aux_wr(ctx, segment);
+ /* Data read */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, 0x50);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "IO error : aux initial fail.\n");
+ return ret;
+ }
+
+ for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
+ sp_tx_aux_wr(ctx, offset);
+ /* Set I2C read com 0x01 mot = 0 and read 16 bytes */
+ ret = sp_tx_aux_rd(ctx, 0xf1);
+
+ if (ret) {
+ ret = sp_tx_rst_aux(ctx);
+ DRM_DEV_ERROR(dev, "segment read fail, reset!\n");
+ } else {
+ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START,
+ MAX_DPCD_BUFFER_SIZE, buf);
+ if (ret > 0)
+ break;
+ }
+ }
+
+ if (cnt > EDID_TRY_CNT)
+ return -EIO;
+
+ return 0;
+}
+
+static int sp_tx_edid_read(struct anx7625_data *ctx,
+ u8 *pedid_blocks_buf)
+{
+ u8 offset, edid_pos;
+ int count, blocks_num;
+ u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
+ u8 i, j;
+ u8 g_edid_break = 0;
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Address initial */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, 0x50);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_15_8, 0);
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_19_16, 0xf0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "access aux channel IO error.\n");
+ return -EIO;
+ }
+
+ blocks_num = sp_tx_get_edid_block(ctx);
+ if (blocks_num < 0)
+ return blocks_num;
+
+ count = 0;
+
+ do {
+ switch (count) {
+ case 0:
+ case 1:
+ for (i = 0; i < 8; i++) {
+ offset = (i + count * 8) * MAX_DPCD_BUFFER_SIZE;
+ g_edid_break = edid_read(ctx, offset,
+ pblock_buf);
+
+ if (g_edid_break)
+ break;
+
+ memcpy(&pedid_blocks_buf[offset],
+ pblock_buf,
+ MAX_DPCD_BUFFER_SIZE);
+ }
+
+ break;
+ case 2:
+ offset = 0x00;
+
+ for (j = 0; j < 8; j++) {
+ edid_pos = (j + count * 8) *
+ MAX_DPCD_BUFFER_SIZE;
+
+ if (g_edid_break == 1)
+ break;
+
+ segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ memcpy(&pedid_blocks_buf[edid_pos],
+ pblock_buf,
+ MAX_DPCD_BUFFER_SIZE);
+ offset = offset + 0x10;
+ }
+
+ break;
+ case 3:
+ offset = 0x80;
+
+ for (j = 0; j < 8; j++) {
+ edid_pos = (j + count * 8) *
+ MAX_DPCD_BUFFER_SIZE;
+ if (g_edid_break == 1)
+ break;
+
+ segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ memcpy(&pedid_blocks_buf[edid_pos],
+ pblock_buf,
+ MAX_DPCD_BUFFER_SIZE);
+ offset = offset + 0x10;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ count++;
+
+ } while (blocks_num >= count);
+
+ /* Check edid data */
+ if (!drm_edid_is_valid((struct edid *)pedid_blocks_buf)) {
+ DRM_DEV_ERROR(dev, "WARNING! edid check fail!\n");
+ return -EINVAL;
+ }
+
+ /* Reset aux channel */
+ sp_tx_rst_aux(ctx);
+
+ return (blocks_num + 1);
+}
+
+static void anx7625_power_on(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ if (!ctx->pdata.low_power_mode) {
+ DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
+ return;
+ }
+
+ /* Power on pin enable */
+ gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ usleep_range(10000, 11000);
+ /* Power reset pin enable */
+ gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ usleep_range(10000, 11000);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
+}
+
+static void anx7625_power_standby(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ if (!ctx->pdata.low_power_mode) {
+ DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
+ return;
+ }
+
+ gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ usleep_range(1000, 1100);
+ DRM_DEV_DEBUG_DRIVER(dev, "power down\n");
+}
+
+/* Basic configurations of ANX7625 */
+static void anx7625_config(struct anx7625_data *ctx)
+{
+ anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ XTAL_FRQ_SEL, XTAL_FRQ_27M);
+}
+
+static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ /* Reset main ocm */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
+ /* Disable PD */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_DISABLE_PD);
+ /* Release main ocm */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x00);
+
+ if (ret < 0)
+ DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+}
+
+static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Check interface workable */
+ ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ FLASH_LOAD_STA);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : access flash load.\n");
+ return ret;
+ }
+ if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK)
+ return -ENODEV;
+
+ anx7625_disable_pd_protocol(ctx);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,",
+ anx7625_reg_read(ctx,
+ ctx->i2c.rx_p0_client,
+ OCM_FW_VERSION),
+ anx7625_reg_read(ctx,
+ ctx->i2c.rx_p0_client,
+ OCM_FW_REVERSION));
+ DRM_DEV_DEBUG_DRIVER(dev, "Driver version %s\n",
+ ANX7625_DRV_VERSION);
+
+ return 0;
+}
+
+static void anx7625_power_on_init(struct anx7625_data *ctx)
+{
+ int retry_count, i;
+
+ for (retry_count = 0; retry_count < 3; retry_count++) {
+ anx7625_power_on(ctx);
+ anx7625_config(ctx);
+
+ for (i = 0; i < OCM_LOADING_TIME; i++) {
+ if (!anx7625_ocm_loading_check(ctx))
+ return;
+ usleep_range(1000, 1100);
+ }
+ anx7625_power_standby(ctx);
+ }
+}
+
+static void anx7625_chip_control(struct anx7625_data *ctx, int state)
+{
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "before set, power_state(%d).\n",
+ atomic_read(&ctx->power_status));
+
+ if (!ctx->pdata.low_power_mode)
+ return;
+
+ if (state) {
+ atomic_inc(&ctx->power_status);
+ if (atomic_read(&ctx->power_status) == 1)
+ anx7625_power_on_init(ctx);
+ } else {
+ if (atomic_read(&ctx->power_status)) {
+ atomic_dec(&ctx->power_status);
+
+ if (atomic_read(&ctx->power_status) == 0)
+ anx7625_power_standby(ctx);
+ }
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "after set, power_state(%d).\n",
+ atomic_read(&ctx->power_status));
+}
+
+static void anx7625_init_gpio(struct anx7625_data *platform)
+{
+ struct device *dev = &platform->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n");
+
+ /* Gpio for chip power enable */
+ platform->pdata.gpio_p_on =
+ devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ /* Gpio for chip reset */
+ platform->pdata.gpio_reset =
+ devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+
+ if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) {
+ platform->pdata.low_power_mode = 1;
+ DRM_DEV_DEBUG_DRIVER(dev, "low power mode, pon %d, reset %d.\n",
+ desc_to_gpio(platform->pdata.gpio_p_on),
+ desc_to_gpio(platform->pdata.gpio_reset));
+ } else {
+ platform->pdata.low_power_mode = 0;
+ DRM_DEV_DEBUG_DRIVER(dev, "not low power mode.\n");
+ }
+}
+
+static void anx7625_stop_dp_work(struct anx7625_data *ctx)
+{
+ ctx->hpd_status = 0;
+ ctx->hpd_high_cnt = 0;
+ ctx->display_timing_valid = 0;
+
+ if (ctx->pdata.low_power_mode == 0)
+ anx7625_disable_pd_protocol(ctx);
+}
+
+static void anx7625_start_dp_work(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ if (ctx->hpd_high_cnt >= 2) {
+ DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n");
+ return;
+ }
+
+ ctx->hpd_high_cnt++;
+
+ /* Not support HDCP */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f);
+
+ /* Try auth flag */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10);
+ /* Interrupt for DRM */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01);
+ if (ret < 0)
+ return;
+
+ ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86);
+ if (ret < 0)
+ return;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret);
+}
+
+static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
+}
+
+static void anx7625_hpd_polling(struct anx7625_data *ctx)
+{
+ int ret, val;
+ struct device *dev = &ctx->client->dev;
+
+ if (atomic_read(&ctx->power_status) != 1) {
+ DRM_DEV_DEBUG_DRIVER(dev, "No need to poling HPD status.\n");
+ return;
+ }
+
+ ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
+ ctx, val,
+ ((val & HPD_STATUS) || (val < 0)),
+ 5000,
+ 5000 * 100);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "HPD polling timeout!\n");
+ } else {
+ DRM_DEV_DEBUG_DRIVER(dev, "HPD raise up.\n");
+ anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+ INTR_ALERT_1, 0xFF);
+ anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT, 0);
+ }
+
+ anx7625_start_dp_work(ctx);
+}
+
+static void anx7625_disconnect_check(struct anx7625_data *ctx)
+{
+ if (atomic_read(&ctx->power_status) == 0)
+ anx7625_stop_dp_work(ctx);
+}
+
+static void anx7625_low_power_mode_check(struct anx7625_data *ctx,
+ int state)
+{
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "low power mode check, state(%d).\n", state);
+
+ if (ctx->pdata.low_power_mode) {
+ anx7625_chip_control(ctx, state);
+ if (state)
+ anx7625_hpd_polling(ctx);
+ else
+ anx7625_disconnect_check(ctx);
+ }
+}
+
+static void anx7625_remove_edid(struct anx7625_data *ctx)
+{
+ ctx->slimport_edid_p.edid_block_num = -1;
+}
+
+static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
+{
+ struct device *dev = &ctx->client->dev;
+
+ /* HPD changed */
+ DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n",
+ (u32)on);
+
+ if (on == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, " HPD low\n");
+ anx7625_remove_edid(ctx);
+ anx7625_stop_dp_work(ctx);
+ } else {
+ DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n");
+ anx7625_start_dp_work(ctx);
+ }
+
+ ctx->hpd_status = 1;
+}
+
+static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
+{
+ int intr_vector, status;
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "power_status=%d\n",
+ (u32)atomic_read(&ctx->power_status));
+
+ status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+ INTR_ALERT_1, 0xFF);
+ if (status < 0) {
+ DRM_DEV_ERROR(dev, "cannot clear alert reg.\n");
+ return status;
+ }
+
+ intr_vector = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT);
+ if (intr_vector < 0) {
+ DRM_DEV_ERROR(dev, "cannot access interrupt change reg.\n");
+ return intr_vector;
+ }
+ DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x44=%x\n", intr_vector);
+ status = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT,
+ intr_vector & (~intr_vector));
+ if (status < 0) {
+ DRM_DEV_ERROR(dev, "cannot clear interrupt change reg.\n");
+ return status;
+ }
+
+ if (!(intr_vector & HPD_STATUS_CHANGE))
+ return -ENOENT;
+
+ status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ SYSTEM_STSTUS);
+ if (status < 0) {
+ DRM_DEV_ERROR(dev, "cannot clear interrupt status.\n");
+ return status;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status);
+ dp_hpd_change_handler(ctx, status & HPD_STATUS);
+
+ return 0;
+}
+
+static void anx7625_work_func(struct work_struct *work)
+{
+ int event;
+ struct anx7625_data *ctx = container_of(work,
+ struct anx7625_data, work);
+
+ mutex_lock(&ctx->lock);
+ event = anx7625_hpd_change_detect(ctx);
+ mutex_unlock(&ctx->lock);
+ if (event < 0)
+ return;
+
+ if (ctx->bridge_attached)
+ drm_helper_hpd_irq_event(ctx->bridge.dev);
+}
+
+static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
+{
+ struct anx7625_data *ctx = (struct anx7625_data *)data;
+
+ if (atomic_read(&ctx->power_status) != 1)
+ return IRQ_NONE;
+
+ queue_work(ctx->workqueue, &ctx->work);
+
+ return IRQ_HANDLED;
+}
+
+static int anx7625_parse_dt(struct device *dev,
+ struct anx7625_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ struct drm_panel *panel;
+ int ret;
+
+ pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
+ if (!pdata->mipi_host_node) {
+ DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
+ return -ENODEV;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n");
+
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0) {
+ if (ret == -ENODEV)
+ return 0;
+ return ret;
+ }
+ if (!panel)
+ return -ENODEV;
+
+ pdata->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(pdata->panel_bridge))
+ return PTR_ERR(pdata->panel_bridge);
+ DRM_DEV_DEBUG_DRIVER(dev, "get panel node.\n");
+
+ return 0;
+}
+
+static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct anx7625_data, bridge);
+}
+
+static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ struct s_edid_data *p_edid = &ctx->slimport_edid_p;
+ int edid_num;
+ u8 *edid;
+
+ edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEV_ERROR(dev, "Fail to allocate buffer\n");
+ return NULL;
+ }
+
+ if (ctx->slimport_edid_p.edid_block_num > 0) {
+ memcpy(edid, ctx->slimport_edid_p.edid_raw_data,
+ FOUR_BLOCK_SIZE);
+ return (struct edid *)edid;
+ }
+
+ anx7625_low_power_mode_check(ctx, 1);
+ edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data);
+ anx7625_low_power_mode_check(ctx, 0);
+
+ if (edid_num < 1) {
+ DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
+ kfree(edid);
+ return NULL;
+ }
+
+ p_edid->edid_block_num = edid_num;
+
+ memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE);
+ return (struct edid *)edid;
+}
+
+static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n");
+
+ return connector_status_connected;
+}
+
+static int anx7625_attach_dsi(struct anx7625_data *ctx)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = &ctx->client->dev;
+ struct mipi_dsi_host *host;
+ const struct mipi_dsi_device_info info = {
+ .type = "anx7625",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
+
+ host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+ if (!host) {
+ DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
+ return -EINVAL;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ DRM_DEV_ERROR(dev, "fail to create dsi device.\n");
+ return -EINVAL;
+ }
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ if (mipi_dsi_attach(dsi) < 0) {
+ DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n");
+ mipi_dsi_device_unregister(dsi);
+ return -EINVAL;
+ }
+
+ ctx->dsi = dsi;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n");
+
+ return 0;
+}
+
+static void anx7625_bridge_detach(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+
+ if (ctx->dsi) {
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+ }
+}
+
+static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ int err;
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n");
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ if (!bridge->encoder) {
+ DRM_DEV_ERROR(dev, "Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ err = anx7625_attach_dsi(ctx);
+ if (err) {
+ DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err);
+ return err;
+ }
+
+ if (ctx->pdata.panel_bridge) {
+ err = drm_bridge_attach(bridge->encoder,
+ ctx->pdata.panel_bridge,
+ &ctx->bridge, flags);
+ if (err) {
+ DRM_DEV_ERROR(dev,
+ "Fail to attach panel bridge: %d\n", err);
+ return err;
+ }
+ }
+
+ ctx->bridge_attached = 1;
+
+ return 0;
+}
+
+static enum drm_mode_status
+anx7625_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n");
+
+ /* Max 1200p at 5.4 Ghz, one lane, pixel clock 300M */
+ if (mode->clock > SUPPORT_PIXEL_CLOCK) {
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "drm mode invalid, pixelclock too high.\n");
+ return MODE_CLOCK_HIGH;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode valid.\n");
+
+ return MODE_OK;
+}
+
+static void anx7625_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *old_mode,
+ const struct drm_display_mode *mode)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n");
+
+ ctx->dt.pixelclock.min = mode->clock;
+ ctx->dt.hactive.min = mode->hdisplay;
+ ctx->dt.hsync_len.min = mode->hsync_end - mode->hsync_start;
+ ctx->dt.hfront_porch.min = mode->hsync_start - mode->hdisplay;
+ ctx->dt.hback_porch.min = mode->htotal - mode->hsync_end;
+ ctx->dt.vactive.min = mode->vdisplay;
+ ctx->dt.vsync_len.min = mode->vsync_end - mode->vsync_start;
+ ctx->dt.vfront_porch.min = mode->vsync_start - mode->vdisplay;
+ ctx->dt.vback_porch.min = mode->vtotal - mode->vsync_end;
+
+ ctx->display_timing_valid = 1;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "pixelclock(%d).\n", ctx->dt.pixelclock.min);
+ DRM_DEV_DEBUG_DRIVER(dev, "hactive(%d), hsync(%d), hfp(%d), hbp(%d)\n",
+ ctx->dt.hactive.min,
+ ctx->dt.hsync_len.min,
+ ctx->dt.hfront_porch.min,
+ ctx->dt.hback_porch.min);
+ DRM_DEV_DEBUG_DRIVER(dev, "vactive(%d), vsync(%d), vfp(%d), vbp(%d)\n",
+ ctx->dt.vactive.min,
+ ctx->dt.vsync_len.min,
+ ctx->dt.vfront_porch.min,
+ ctx->dt.vback_porch.min);
+ DRM_DEV_DEBUG_DRIVER(dev, "hdisplay(%d),hsync_start(%d).\n",
+ mode->hdisplay,
+ mode->hsync_start);
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_end(%d),htotal(%d).\n",
+ mode->hsync_end,
+ mode->htotal);
+ DRM_DEV_DEBUG_DRIVER(dev, "vdisplay(%d),vsync_start(%d).\n",
+ mode->vdisplay,
+ mode->vsync_start);
+ DRM_DEV_DEBUG_DRIVER(dev, "vsync_end(%d),vtotal(%d).\n",
+ mode->vsync_end,
+ mode->vtotal);
+}
+
+static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+ u32 hsync, hfp, hbp, hblanking;
+ u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj;
+ u32 vref, adj_clock;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n");
+
+ hsync = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hblanking = mode->htotal - mode->hdisplay;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "before mode fixup\n");
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n",
+ hsync, hfp, hbp, adj->clock);
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n",
+ adj->hsync_start, adj->hsync_end, adj->htotal);
+
+ adj_hfp = hfp;
+ adj_hsync = hsync;
+ adj_hbp = hbp;
+ adj_hblanking = hblanking;
+
+ /* HFP needs to be even */
+ if (hfp & 0x1) {
+ adj_hfp += 1;
+ adj_hblanking += 1;
+ }
+
+ /* HBP needs to be even */
+ if (hbp & 0x1) {
+ adj_hbp -= 1;
+ adj_hblanking -= 1;
+ }
+
+ /* HSYNC needs to be even */
+ if (hsync & 0x1) {
+ if (adj_hblanking < hblanking)
+ adj_hsync += 1;
+ else
+ adj_hsync -= 1;
+ }
+
+ /*
+ * Once illegal timing detected, use default HFP, HSYNC, HBP
+ * This adjusting made for built-in eDP panel, for the externel
+ * DP monitor, may need return false.
+ */
+ if (hblanking < HBLANKING_MIN || (hfp < HP_MIN && hbp < HP_MIN)) {
+ adj_hsync = SYNC_LEN_DEF;
+ adj_hfp = HFP_HBP_DEF;
+ adj_hbp = HFP_HBP_DEF;
+ vref = adj->clock * 1000 / (adj->htotal * adj->vtotal);
+ if (hblanking < HBLANKING_MIN) {
+ delta_adj = HBLANKING_MIN - hblanking;
+ adj_clock = vref * delta_adj * adj->vtotal;
+ adj->clock += DIV_ROUND_UP(adj_clock, 1000);
+ } else {
+ delta_adj = hblanking - HBLANKING_MIN;
+ adj_clock = vref * delta_adj * adj->vtotal;
+ adj->clock -= DIV_ROUND_UP(adj_clock, 1000);
+ }
+
+ DRM_WARN("illegal hblanking timing, use default.\n");
+ DRM_WARN("hfp(%d), hbp(%d), hsync(%d).\n", hfp, hbp, hsync);
+ } else if (adj_hfp < HP_MIN) {
+ /* Adjust hfp if hfp less than HP_MIN */
+ delta_adj = HP_MIN - adj_hfp;
+ adj_hfp = HP_MIN;
+
+ /*
+ * Balance total HBlanking pixel, if HBP does not have enough
+ * space, adjust HSYNC length, otherwise adjust HBP
+ */
+ if ((adj_hbp - delta_adj) < HP_MIN)
+ /* HBP not enough space */
+ adj_hsync -= delta_adj;
+ else
+ adj_hbp -= delta_adj;
+ } else if (adj_hbp < HP_MIN) {
+ delta_adj = HP_MIN - adj_hbp;
+ adj_hbp = HP_MIN;
+
+ /*
+ * Balance total HBlanking pixel, if HBP hasn't enough space,
+ * adjust HSYNC length, otherwize adjust HBP
+ */
+ if ((adj_hfp - delta_adj) < HP_MIN)
+ /* HFP not enough space */
+ adj_hsync -= delta_adj;
+ else
+ adj_hfp -= delta_adj;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "after mode fixup\n");
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n",
+ adj_hsync, adj_hfp, adj_hbp, adj->clock);
+
+ /* Reconstruct timing */
+ adj->hsync_start = adj->hdisplay + adj_hfp;
+ adj->hsync_end = adj->hsync_start + adj_hsync;
+ adj->htotal = adj->hsync_end + adj_hbp;
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n",
+ adj->hsync_start, adj->hsync_end, adj->htotal);
+
+ return true;
+}
+
+static void anx7625_bridge_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm enable\n");
+
+ anx7625_low_power_mode_check(ctx, 1);
+
+ if (WARN_ON(!atomic_read(&ctx->power_status)))
+ return;
+
+ anx7625_dp_start(ctx);
+}
+
+static void anx7625_bridge_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ if (WARN_ON(!atomic_read(&ctx->power_status)))
+ return;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm disable\n");
+
+ anx7625_dp_stop(ctx);
+
+ anx7625_low_power_mode_check(ctx, 0);
+}
+
+static enum drm_connector_status
+anx7625_bridge_detect(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
+
+ return anx7625_sink_detect(ctx);
+}
+
+static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
+
+ return anx7625_get_edid(ctx);
+}
+
+static const struct drm_bridge_funcs anx7625_bridge_funcs = {
+ .attach = anx7625_bridge_attach,
+ .detach = anx7625_bridge_detach,
+ .disable = anx7625_bridge_disable,
+ .mode_valid = anx7625_bridge_mode_valid,
+ .mode_set = anx7625_bridge_mode_set,
+ .mode_fixup = anx7625_bridge_mode_fixup,
+ .enable = anx7625_bridge_enable,
+ .detect = anx7625_bridge_detect,
+ .get_edid = anx7625_bridge_get_edid,
+};
+
+static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
+ struct i2c_client *client)
+{
+ ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter,
+ TX_P0_ADDR >> 1);
+ if (!ctx->i2c.tx_p0_client)
+ return -ENOMEM;
+
+ ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter,
+ TX_P1_ADDR >> 1);
+ if (!ctx->i2c.tx_p1_client)
+ goto free_tx_p0;
+
+ ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter,
+ TX_P2_ADDR >> 1);
+ if (!ctx->i2c.tx_p2_client)
+ goto free_tx_p1;
+
+ ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter,
+ RX_P0_ADDR >> 1);
+ if (!ctx->i2c.rx_p0_client)
+ goto free_tx_p2;
+
+ ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter,
+ RX_P1_ADDR >> 1);
+ if (!ctx->i2c.rx_p1_client)
+ goto free_rx_p0;
+
+ ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter,
+ RX_P2_ADDR >> 1);
+ if (!ctx->i2c.rx_p2_client)
+ goto free_rx_p1;
+
+ ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter,
+ TCPC_INTERFACE_ADDR >> 1);
+ if (!ctx->i2c.tcpc_client)
+ goto free_rx_p2;
+
+ return 0;
+
+free_rx_p2:
+ i2c_unregister_device(ctx->i2c.rx_p2_client);
+free_rx_p1:
+ i2c_unregister_device(ctx->i2c.rx_p1_client);
+free_rx_p0:
+ i2c_unregister_device(ctx->i2c.rx_p0_client);
+free_tx_p2:
+ i2c_unregister_device(ctx->i2c.tx_p2_client);
+free_tx_p1:
+ i2c_unregister_device(ctx->i2c.tx_p1_client);
+free_tx_p0:
+ i2c_unregister_device(ctx->i2c.tx_p0_client);
+
+ return -ENOMEM;
+}
+
+static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
+{
+ i2c_unregister_device(ctx->i2c.tx_p0_client);
+ i2c_unregister_device(ctx->i2c.tx_p1_client);
+ i2c_unregister_device(ctx->i2c.tx_p2_client);
+ i2c_unregister_device(ctx->i2c.rx_p0_client);
+ i2c_unregister_device(ctx->i2c.rx_p1_client);
+ i2c_unregister_device(ctx->i2c.rx_p2_client);
+ i2c_unregister_device(ctx->i2c.tcpc_client);
+}
+
+static int anx7625_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct anx7625_data *platform;
+ struct anx7625_platform_data *pdata;
+ int ret = 0;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ DRM_DEV_ERROR(dev, "anx7625's i2c bus doesn't support\n");
+ return -ENODEV;
+ }
+
+ platform = kzalloc(sizeof(*platform), GFP_KERNEL);
+ if (!platform) {
+ DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ pdata = &platform->pdata;
+
+ ret = anx7625_parse_dt(dev, pdata);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+ goto free_platform;
+ }
+
+ platform->client = client;
+ i2c_set_clientdata(client, platform);
+
+ anx7625_init_gpio(platform);
+
+ atomic_set(&platform->power_status, 0);
+
+ mutex_init(&platform->lock);
+
+ platform->pdata.intp_irq = client->irq;
+ if (platform->pdata.intp_irq) {
+ INIT_WORK(&platform->work, anx7625_work_func);
+ platform->workqueue = create_workqueue("anx7625_work");
+ if (!platform->workqueue) {
+ DRM_DEV_ERROR(dev, "fail to create work queue\n");
+ ret = -ENOMEM;
+ goto free_platform;
+ }
+
+ ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
+ NULL, anx7625_intr_hpd_isr,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "anx7625-intp", platform);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "fail to request irq\n");
+ goto free_wq;
+ }
+ }
+
+ if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
+ goto free_wq;
+ }
+
+ if (platform->pdata.low_power_mode == 0) {
+ anx7625_disable_pd_protocol(platform);
+ atomic_set(&platform->power_status, 1);
+ }
+
+ /* Add work function */
+ if (platform->pdata.intp_irq)
+ queue_work(platform->workqueue, &platform->work);
+
+ platform->bridge.funcs = &anx7625_bridge_funcs;
+ platform->bridge.of_node = client->dev.of_node;
+ platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ platform->bridge.type = DRM_MODE_CONNECTOR_eDP;
+ drm_bridge_add(&platform->bridge);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "probe done\n");
+
+ return 0;
+
+free_wq:
+ if (platform->workqueue)
+ destroy_workqueue(platform->workqueue);
+
+free_platform:
+ kfree(platform);
+
+ return ret;
+}
+
+static int anx7625_i2c_remove(struct i2c_client *client)
+{
+ struct anx7625_data *platform = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&platform->bridge);
+
+ if (platform->pdata.intp_irq)
+ destroy_workqueue(platform->workqueue);
+
+ anx7625_unregister_i2c_dummy_clients(platform);
+
+ kfree(platform);
+ return 0;
+}
+
+static const struct i2c_device_id anx7625_id[] = {
+ {"anx7625", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, anx7625_id);
+
+static const struct of_device_id anx_match_table[] = {
+ {.compatible = "analogix,anx7625",},
+ {},
+};
+
+static struct i2c_driver anx7625_driver = {
+ .driver = {
+ .name = "anx7625",
+ .of_match_table = anx_match_table,
+ },
+ .probe = anx7625_i2c_probe,
+ .remove = anx7625_i2c_remove,
+
+ .id_table = anx7625_id,
+};
+
+module_i2c_driver(anx7625_driver);
+
+MODULE_DESCRIPTION("MIPI2DP anx7625 driver");
+MODULE_AUTHOR("Xin Ji <xji@analogixsemi.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ANX7625_DRV_VERSION);
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
+ *
+ */
+
+#ifndef __ANX7625_H__
+#define __ANX7625_H__
+
+#define ANX7625_DRV_VERSION "0.1.04"
+
+/* Loading OCM re-trying times */
+#define OCM_LOADING_TIME 10
+
+/********* ANX7625 Register **********/
+#define TX_P0_ADDR 0x70
+#define TX_P1_ADDR 0x7A
+#define TX_P2_ADDR 0x72
+
+#define RX_P0_ADDR 0x7e
+#define RX_P1_ADDR 0x84
+#define RX_P2_ADDR 0x54
+
+#define RSVD_00_ADDR 0x00
+#define RSVD_D1_ADDR 0xD1
+#define RSVD_60_ADDR 0x60
+#define RSVD_39_ADDR 0x39
+#define RSVD_7F_ADDR 0x7F
+
+#define TCPC_INTERFACE_ADDR 0x58
+
+/* Clock frequency in Hz */
+#define XTAL_FRQ (27 * 1000000)
+
+#define POST_DIVIDER_MIN 1
+#define POST_DIVIDER_MAX 16
+#define PLL_OUT_FREQ_MIN 520000000UL
+#define PLL_OUT_FREQ_MAX 730000000UL
+#define PLL_OUT_FREQ_ABS_MIN 300000000UL
+#define PLL_OUT_FREQ_ABS_MAX 800000000UL
+#define MAX_UNSIGNED_24BIT 16777215UL
+
+/***************************************************************/
+/* Register definition of device address 0x58 */
+
+#define PRODUCT_ID_L 0x02
+#define PRODUCT_ID_H 0x03
+
+#define INTR_ALERT_1 0xCC
+#define INTR_SOFTWARE_INT BIT(3)
+#define INTR_RECEIVED_MSG BIT(5)
+
+#define SYSTEM_STSTUS 0x45
+#define INTERFACE_CHANGE_INT 0x44
+#define HPD_STATUS_CHANGE 0x80
+#define HPD_STATUS 0x80
+
+/******** END of I2C Address 0x58 ********/
+
+/***************************************************************/
+/* Register definition of device address 0x70 */
+#define I2C_ADDR_70_DPTX 0x70
+
+#define SP_TX_LINK_BW_SET_REG 0xA0
+#define SP_TX_LANE_COUNT_SET_REG 0xA1
+
+#define M_VID_0 0xC0
+#define M_VID_1 0xC1
+#define M_VID_2 0xC2
+#define N_VID_0 0xC3
+#define N_VID_1 0xC4
+#define N_VID_2 0xC5
+
+/***************************************************************/
+/* Register definition of device address 0x72 */
+#define AUX_RST 0x04
+#define RST_CTRL2 0x07
+
+#define SP_TX_TOTAL_LINE_STA_L 0x24
+#define SP_TX_TOTAL_LINE_STA_H 0x25
+#define SP_TX_ACT_LINE_STA_L 0x26
+#define SP_TX_ACT_LINE_STA_H 0x27
+#define SP_TX_V_F_PORCH_STA 0x28
+#define SP_TX_V_SYNC_STA 0x29
+#define SP_TX_V_B_PORCH_STA 0x2A
+#define SP_TX_TOTAL_PIXEL_STA_L 0x2B
+#define SP_TX_TOTAL_PIXEL_STA_H 0x2C
+#define SP_TX_ACT_PIXEL_STA_L 0x2D
+#define SP_TX_ACT_PIXEL_STA_H 0x2E
+#define SP_TX_H_F_PORCH_STA_L 0x2F
+#define SP_TX_H_F_PORCH_STA_H 0x30
+#define SP_TX_H_SYNC_STA_L 0x31
+#define SP_TX_H_SYNC_STA_H 0x32
+#define SP_TX_H_B_PORCH_STA_L 0x33
+#define SP_TX_H_B_PORCH_STA_H 0x34
+
+#define SP_TX_VID_CTRL 0x84
+#define SP_TX_BPC_MASK 0xE0
+#define SP_TX_BPC_6 0x00
+#define SP_TX_BPC_8 0x20
+#define SP_TX_BPC_10 0x40
+#define SP_TX_BPC_12 0x60
+
+#define VIDEO_BIT_MATRIX_12 0x4c
+
+#define AUDIO_CHANNEL_STATUS_1 0xd0
+#define AUDIO_CHANNEL_STATUS_2 0xd1
+#define AUDIO_CHANNEL_STATUS_3 0xd2
+#define AUDIO_CHANNEL_STATUS_4 0xd3
+#define AUDIO_CHANNEL_STATUS_5 0xd4
+#define AUDIO_CHANNEL_STATUS_6 0xd5
+#define TDM_SLAVE_MODE 0x10
+#define I2S_SLAVE_MODE 0x08
+
+#define AUDIO_CONTROL_REGISTER 0xe6
+#define TDM_TIMING_MODE 0x08
+
+#define I2C_ADDR_72_DPTX 0x72
+
+#define HP_MIN 8
+#define HBLANKING_MIN 80
+#define SYNC_LEN_DEF 32
+#define HFP_HBP_DEF ((HBLANKING_MIN - SYNC_LEN_DEF) / 2)
+#define VIDEO_CONTROL_0 0x08
+
+#define ACTIVE_LINES_L 0x14
+#define ACTIVE_LINES_H 0x15 /* Bit[7:6] are reserved */
+#define VERTICAL_FRONT_PORCH 0x16
+#define VERTICAL_SYNC_WIDTH 0x17
+#define VERTICAL_BACK_PORCH 0x18
+
+#define HORIZONTAL_TOTAL_PIXELS_L 0x19
+#define HORIZONTAL_TOTAL_PIXELS_H 0x1A /* Bit[7:6] are reserved */
+#define HORIZONTAL_ACTIVE_PIXELS_L 0x1B
+#define HORIZONTAL_ACTIVE_PIXELS_H 0x1C /* Bit[7:6] are reserved */
+#define HORIZONTAL_FRONT_PORCH_L 0x1D
+#define HORIZONTAL_FRONT_PORCH_H 0x1E /* Bit[7:4] are reserved */
+#define HORIZONTAL_SYNC_WIDTH_L 0x1F
+#define HORIZONTAL_SYNC_WIDTH_H 0x20 /* Bit[7:4] are reserved */
+#define HORIZONTAL_BACK_PORCH_L 0x21
+#define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */
+
+/******** END of I2C Address 0x72 *********/
+/***************************************************************/
+/* Register definition of device address 0x7e */
+
+#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E
+
+#define FLASH_LOAD_STA 0x05
+#define FLASH_LOAD_STA_CHK BIT(7)
+
+#define XTAL_FRQ_SEL 0x3F
+/* bit field positions */
+#define XTAL_FRQ_SEL_POS 5
+/* bit field values */
+#define XTAL_FRQ_19M2 (0 << XTAL_FRQ_SEL_POS)
+#define XTAL_FRQ_27M (4 << XTAL_FRQ_SEL_POS)
+
+#define R_DSC_CTRL_0 0x40
+#define READ_STATUS_EN 7
+#define CLK_1MEG_RB 6 /* 1MHz clock reset; 0=reset, 0=reset release */
+#define DSC_BIST_DONE 1 /* Bit[5:1]: 1=DSC MBIST pass */
+#define DSC_EN 0x01 /* 1=DSC enabled, 0=DSC disabled */
+
+#define OCM_FW_VERSION 0x31
+#define OCM_FW_REVERSION 0x32
+
+#define AP_AUX_ADDR_7_0 0x11
+#define AP_AUX_ADDR_15_8 0x12
+#define AP_AUX_ADDR_19_16 0x13
+
+/* Bit[0:3] AUX status, bit 4 op_en, bit 5 address only */
+#define AP_AUX_CTRL_STATUS 0x14
+#define AP_AUX_CTRL_OP_EN 0x10
+#define AP_AUX_CTRL_ADDRONLY 0x20
+
+#define AP_AUX_BUFF_START 0x15
+#define PIXEL_CLOCK_L 0x25
+#define PIXEL_CLOCK_H 0x26
+
+#define AP_AUX_COMMAND 0x27 /* com+len */
+/* Bit 0&1: 3D video structure */
+/* 0x01: frame packing, 0x02:Line alternative, 0x03:Side-by-side(full) */
+#define AP_AV_STATUS 0x28
+#define AP_VIDEO_CHG BIT(2)
+#define AP_AUDIO_CHG BIT(3)
+#define AP_MIPI_MUTE BIT(4) /* 1:MIPI input mute, 0: ummute */
+#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
+#define AP_DISABLE_PD BIT(6)
+#define AP_DISABLE_DISPLAY BIT(7)
+/***************************************************************/
+/* Register definition of device address 0x84 */
+#define MIPI_PHY_CONTROL_3 0x03
+#define MIPI_HS_PWD_CLK 7
+#define MIPI_HS_RT_CLK 6
+#define MIPI_PD_CLK 5
+#define MIPI_CLK_RT_MANUAL_PD_EN 4
+#define MIPI_CLK_HS_MANUAL_PD_EN 3
+#define MIPI_CLK_DET_DET_BYPASS 2
+#define MIPI_CLK_MISS_CTRL 1
+#define MIPI_PD_LPTX_CH_MANUAL_PD_EN 0
+
+#define MIPI_LANE_CTRL_0 0x05
+#define MIPI_TIME_HS_PRPR 0x08
+
+/*
+ * After MIPI RX protocol layer received video frames,
+ * Protocol layer starts to reconstruct video stream from PHY
+ */
+#define MIPI_VIDEO_STABLE_CNT 0x0A
+
+#define MIPI_LANE_CTRL_10 0x0F
+#define MIPI_DIGITAL_ADJ_1 0x1B
+
+#define MIPI_PLL_M_NUM_23_16 0x1E
+#define MIPI_PLL_M_NUM_15_8 0x1F
+#define MIPI_PLL_M_NUM_7_0 0x20
+#define MIPI_PLL_N_NUM_23_16 0x21
+#define MIPI_PLL_N_NUM_15_8 0x22
+#define MIPI_PLL_N_NUM_7_0 0x23
+
+#define MIPI_DIGITAL_PLL_6 0x2A
+/* Bit[7:6]: VCO band control, only effective */
+#define MIPI_M_NUM_READY 0x10
+#define MIPI_N_NUM_READY 0x08
+#define STABLE_INTEGER_CNT_EN 0x04
+#define MIPI_PLL_TEST_BIT 0
+/* Bit[1:0]: test point output select - */
+/* 00: VCO power, 01: dvdd_pdt, 10: dvdd, 11: vcox */
+
+#define MIPI_DIGITAL_PLL_7 0x2B
+#define MIPI_PLL_FORCE_N_EN 7
+#define MIPI_PLL_FORCE_BAND_EN 6
+
+#define MIPI_PLL_VCO_TUNE_REG 4
+/* Bit[5:4]: VCO metal capacitance - */
+/* 00: +20% fast, 01: +10% fast (default), 10: typical, 11: -10% slow */
+#define MIPI_PLL_VCO_TUNE_REG_VAL 0x30
+
+#define MIPI_PLL_PLL_LDO_BIT 2
+/* Bit[3:2]: vco_v2i power - */
+/* 00: 1.40V, 01: 1.45V (default), 10: 1.50V, 11: 1.55V */
+#define MIPI_PLL_RESET_N 0x02
+#define MIPI_FRQ_FORCE_NDET 0
+
+#define MIPI_ALERT_CLR_0 0x2D
+#define HS_link_error_clear 7
+/* This bit itself is S/C, and it clears 0x84:0x31[7] */
+
+#define MIPI_ALERT_OUT_0 0x31
+#define check_sum_err_hs_sync 7
+/* This bit is cleared by 0x84:0x2D[7] */
+
+#define MIPI_DIGITAL_PLL_8 0x33
+#define MIPI_POST_DIV_VAL 4
+/* N means divided by (n+1), n = 0~15 */
+#define MIPI_EN_LOCK_FRZ 3
+#define MIPI_FRQ_COUNTER_RST 2
+#define MIPI_FRQ_SET_REG_8 1
+/* Bit 0 is reserved */
+
+#define MIPI_DIGITAL_PLL_9 0x34
+
+#define MIPI_DIGITAL_PLL_16 0x3B
+#define MIPI_FRQ_FREEZE_NDET 7
+#define MIPI_FRQ_REG_SET_ENABLE 6
+#define MIPI_REG_FORCE_SEL_EN 5
+#define MIPI_REG_SEL_DIV_REG 4
+#define MIPI_REG_FORCE_PRE_DIV_EN 3
+/* Bit 2 is reserved */
+#define MIPI_FREF_D_IND 1
+#define REF_CLK_27000KHZ 1
+#define REF_CLK_19200KHZ 0
+#define MIPI_REG_PLL_PLL_TEST_ENABLE 0
+
+#define MIPI_DIGITAL_PLL_18 0x3D
+#define FRQ_COUNT_RB_SEL 7
+#define REG_FORCE_POST_DIV_EN 6
+#define MIPI_DPI_SELECT 5
+#define SELECT_DSI 1
+#define SELECT_DPI 0
+#define REG_BAUD_DIV_RATIO 0
+
+#define H_BLANK_L 0x3E
+/* For DSC only */
+#define H_BLANK_H 0x3F
+/* For DSC only; note: bit[7:6] are reserved */
+#define MIPI_SWAP 0x4A
+#define MIPI_SWAP_CH0 7
+#define MIPI_SWAP_CH1 6
+#define MIPI_SWAP_CH2 5
+#define MIPI_SWAP_CH3 4
+#define MIPI_SWAP_CLK 3
+/* Bit[2:0] are reserved */
+
+/******** END of I2C Address 0x84 *********/
+
+/* DPCD regs */
+#define DPCD_DPCD_REV 0x00
+#define DPCD_MAX_LINK_RATE 0x01
+#define DPCD_MAX_LANE_COUNT 0x02
+
+/********* ANX7625 Register End **********/
+
+/***************** Display *****************/
+enum audio_fs {
+ AUDIO_FS_441K = 0x00,
+ AUDIO_FS_48K = 0x02,
+ AUDIO_FS_32K = 0x03,
+ AUDIO_FS_882K = 0x08,
+ AUDIO_FS_96K = 0x0a,
+ AUDIO_FS_1764K = 0x0c,
+ AUDIO_FS_192K = 0x0e
+};
+
+enum audio_wd_len {
+ AUDIO_W_LEN_16_20MAX = 0x02,
+ AUDIO_W_LEN_18_20MAX = 0x04,
+ AUDIO_W_LEN_17_20MAX = 0x0c,
+ AUDIO_W_LEN_19_20MAX = 0x08,
+ AUDIO_W_LEN_20_20MAX = 0x0a,
+ AUDIO_W_LEN_20_24MAX = 0x03,
+ AUDIO_W_LEN_22_24MAX = 0x05,
+ AUDIO_W_LEN_21_24MAX = 0x0d,
+ AUDIO_W_LEN_23_24MAX = 0x09,
+ AUDIO_W_LEN_24_24MAX = 0x0b
+};
+
+#define I2S_CH_2 0x01
+#define TDM_CH_4 0x03
+#define TDM_CH_6 0x05
+#define TDM_CH_8 0x07
+
+#define MAX_DPCD_BUFFER_SIZE 16
+
+#define ONE_BLOCK_SIZE 128
+#define FOUR_BLOCK_SIZE (128 * 4)
+
+#define MAX_EDID_BLOCK 3
+#define EDID_TRY_CNT 3
+#define SUPPORT_PIXEL_CLOCK 300000
+
+struct s_edid_data {
+ int edid_block_num;
+ u8 edid_raw_data[FOUR_BLOCK_SIZE];
+};
+
+/***************** Display End *****************/
+
+struct anx7625_platform_data {
+ struct gpio_desc *gpio_p_on;
+ struct gpio_desc *gpio_reset;
+ struct drm_bridge *panel_bridge;
+ int intp_irq;
+ u32 low_power_mode;
+ struct device_node *mipi_host_node;
+};
+
+struct anx7625_i2c_client {
+ struct i2c_client *tx_p0_client;
+ struct i2c_client *tx_p1_client;
+ struct i2c_client *tx_p2_client;
+ struct i2c_client *rx_p0_client;
+ struct i2c_client *rx_p1_client;
+ struct i2c_client *rx_p2_client;
+ struct i2c_client *tcpc_client;
+};
+
+struct anx7625_data {
+ struct anx7625_platform_data pdata;
+ atomic_t power_status;
+ int hpd_status;
+ int hpd_high_cnt;
+ /* Lock for work queue */
+ struct mutex lock;
+ struct i2c_client *client;
+ struct anx7625_i2c_client i2c;
+ struct i2c_client *last_client;
+ struct s_edid_data slimport_edid_p;
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+ char edid_block;
+ struct display_timing dt;
+ u8 display_timing_valid;
+ struct drm_bridge bridge;
+ u8 bridge_attached;
+ struct mipi_dsi_device *dsi;
+};
+
+#endif /* __ANX7625_H__ */
return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
}
-static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
+static const struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
struct tc358764 {
struct device *dev;
struct drm_bridge bridge;
+ struct drm_connector connector;
struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
struct gpio_desc *gpio_reset;
- struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
int error;
};
return container_of(bridge, struct tc358764, bridge);
}
+static inline
+struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
+{
+ return container_of(connector, struct tc358764, connector);
+}
+
static int tc358764_init(struct tc358764 *ctx)
{
u32 v = 0;
usleep_range(1000, 2000);
}
+static int tc358764_get_modes(struct drm_connector *connector)
+{
+ struct tc358764 *ctx = connector_to_tc358764(connector);
+
+ return drm_panel_get_modes(ctx->panel, connector);
+}
+
+static const
+struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
+ .get_modes = tc358764_get_modes,
+};
+
+static const struct drm_connector_funcs tc358764_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void tc358764_disable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
+}
+
static void tc358764_post_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret;
+ ret = drm_panel_unprepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
tc358764_reset(ctx);
usleep_range(10000, 15000);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
ret = tc358764_init(ctx);
if (ret < 0)
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
+ ret = drm_panel_prepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
+}
+
+static void tc358764_enable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_enable(ctx->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
static int tc358764_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(drm, &ctx->connector,
+ &tc358764_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&ctx->connector,
+ &tc358764_connector_helper_funcs);
+ drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
+ ctx->connector.funcs->reset(&ctx->connector);
+ drm_connector_register(&ctx->connector);
+
+ return 0;
+}
+
+static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
- bridge, flags);
+ drm_connector_unregister(&ctx->connector);
+ ctx->panel = NULL;
+ drm_connector_put(&ctx->connector);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
+ .disable = tc358764_disable,
.post_disable = tc358764_post_disable,
+ .enable = tc358764_enable,
.pre_enable = tc358764_pre_enable,
.attach = tc358764_attach,
+ .detach = tc358764_detach,
};
static int tc358764_parse_dt(struct tc358764 *ctx)
{
- struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
- struct drm_panel *panel;
int ret;
ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
return PTR_ERR(ctx->gpio_reset);
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret)
- return ret;
-
- panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
+ NULL);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot find panel (%d)\n", ret);
- ctx->panel_bridge = panel_bridge;
- return 0;
+ return ret;
}
static int tc358764_configure_regulators(struct tc358764 *ctx)
return ret;
ctx->bridge.funcs = &tc358764_bridge_funcs;
- ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
#define SN_NUM_GPIOS 4
#define SN_GPIO_PHYSICAL_OFFSET 1
+#define SN_LINK_TRAINING_TRIES 10
+
/**
* struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver.
* @dev: Pointer to our device.
{
unsigned int val;
int ret;
+ int i;
/* set dp clk frequency value */
regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
goto exit;
}
- /* Semi auto link training mode */
- regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
- ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
- val == ML_TX_MAIN_LINK_OFF ||
- val == ML_TX_NORMAL_MODE, 1000,
- 500 * 1000);
- if (ret) {
- *last_err_str = "Training complete polling failed";
- } else if (val == ML_TX_MAIN_LINK_OFF) {
- *last_err_str = "Link training failed, link is off";
- ret = -EIO;
+ /*
+ * We'll try to link train several times. As part of link training
+ * the bridge chip will write DP_SET_POWER_D0 to DP_SET_POWER. If
+ * the panel isn't ready quite it might respond NAK here which means
+ * we need to try again.
+ */
+ for (i = 0; i < SN_LINK_TRAINING_TRIES; i++) {
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ *last_err_str = "Training complete polling failed";
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ *last_err_str = "Link training failed, link is off";
+ ret = -EIO;
+ continue;
+ }
+
+ break;
}
+ /* If we saw quite a few retries, add a note about it */
+ if (!ret && i > SN_LINK_TRAINING_TRIES / 2)
+ DRM_DEV_INFO(pdata->dev, "Link training needed %d retries\n", i);
+
exit:
/* Disable the PLL if we failed */
if (ret)
{
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- if (pdata->refclk)
- clk_disable_unprepare(pdata->refclk);
+ clk_disable_unprepare(pdata->refclk);
pm_runtime_put_sync(pdata->dev);
}
* needed. It will also grab the relevant CRTC lock to make sure that the state
* is consistent.
*
+ * WARNING: Drivers may only add new CRTC states to a @state if
+ * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit
+ * not created by userspace through an IOCTL call.
+ *
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
+ unsigned requested_crtc = 0;
+ unsigned affected_crtc = 0;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ requested_crtc |= drm_crtc_mask(crtc);
+
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
}
}
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ affected_crtc |= drm_crtc_mask(crtc);
+
+ /*
+ * For commits that allow modesets drivers can add other CRTCs to the
+ * atomic commit, e.g. when they need to reallocate global resources.
+ * This can cause spurious EBUSY, which robs compositors of a very
+ * effective sanity check for their drawing loop. Therefor only allow
+ * drivers to add unrelated CRTC states for modeset commits.
+ *
+ * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output
+ * so compositors know what's going on.
+ */
+ if (affected_crtc != requested_crtc) {
+ DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
+ requested_crtc, affected_crtc);
+ WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
+ requested_crtc, affected_crtc);
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
* to dmesg in case of error irq's. (Hint, you probably want to
* ratelimit this!)
*
- * The caller must drm_modeset_lock_all(), or if this is called
- * from error irq handler, it should not be enabled by default.
- * (Ie. if you are debugging errors you might not care that this
- * is racey. But calling this without all modeset locks held is
- * not inherently safe.)
+ * The caller must wrap this drm_modeset_lock_all_ctx() and
+ * drm_modeset_drop_locks(). If this is called from error irq handler, it should
+ * not be enabled by default - if you are debugging errors you might
+ * not care that this is racey, but calling this without all modeset locks held
+ * is inherently unsafe.
*/
void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
{
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
- funcs->atomic_disable(crtc, old_crtc_state);
+ funcs->atomic_disable(crtc, old_state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
- funcs->atomic_enable(crtc, old_crtc_state);
+ funcs->atomic_enable(crtc, old_state);
else if (funcs->commit)
funcs->commit(crtc);
}
* overridden by a previous synchronous update's state.
*/
if (old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n",
+ plane->base.id, plane->name);
return -EBUSY;
+ }
return funcs->atomic_async_check(plane, new_plane_state);
}
* commit with nonblocking ones. */
if (!completed && nonblock) {
spin_unlock(&crtc->commit_lock);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
+ crtc->base.id, crtc->name);
+
return -EBUSY;
}
} else if (i == 1) {
/* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones. */
if (nonblock && old_conn_state->commit &&
- !try_wait_for_completion(&old_conn_state->commit->flip_done))
+ !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
+ conn->base.id, conn->name);
+
return -EBUSY;
+ }
/* Always track connectors explicitly for e.g. link retraining. */
commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
/* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones. */
if (nonblock && old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->flip_done))
+ !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
+ plane->base.id, plane->name);
+
return -EBUSY;
+ }
/* Always track planes explicitly for async pageflip support. */
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
* drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
+ *
+ * User-space should not parse the EDID to obtain information exposed via
+ * other KMS properties (because the kernel might apply limits, quirks or
+ * fixups to the EDID). For instance, user-space should not try to parse
+ * mode lists from the EDID.
* DPMS:
* Legacy property for setting the power state of the connector. For atomic
* drivers this is only provided for backwards compatibility with existing
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
- if (!kref_get_unless_zero(&aux_dev->refcount))
+ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
WARN_ON(mgr->mst_primary);
/* get dpcd info */
- ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (ret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("failed to read DPCD\n");
+ ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
+ mgr->aux->name, ret);
goto out_unlock;
}
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
- drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name);
+ drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
for (i = 0; i < num_blocks; i++) {
u8 *block = edid + i * EDID_LENGTH;
char prefix[20];
else
sprintf(prefix, "\t[%02x] GOOD ", i);
- print_hex_dump(KERN_WARNING,
+ print_hex_dump(KERN_DEBUG,
prefix, DUMP_PREFIX_NONE, 16, 1,
block, EDID_LENGTH, false);
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
-/*
- * restore fbcon display for all kms driver's using this helper, used for sysrq
- * and panic handling.
- */
-static bool drm_fb_helper_force_kernel_mode(void)
+/* emergency restore, don't bother with error reporting */
+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- bool ret, error = false;
struct drm_fb_helper *helper;
- if (list_empty(&kernel_fb_helper_list))
- return false;
-
+ mutex_lock(&kernel_fb_helper_lock);
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
struct drm_device *dev = helper->dev;
continue;
mutex_lock(&helper->lock);
- ret = drm_client_modeset_commit_locked(&helper->client);
- if (ret)
- error = true;
+ drm_client_modeset_commit_locked(&helper->client);
mutex_unlock(&helper->lock);
}
- return error;
+ mutex_unlock(&kernel_fb_helper_lock);
}
-static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-{
- bool ret;
-
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
-}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
static void drm_fb_helper_sysrq(int dummy1)
{ .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
- struct drm_device *dev = obj->dev;
- if (obj->funcs && obj->funcs->close)
+ if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- else if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, file_priv);
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
if (ret)
goto err_remove;
- if (obj->funcs && obj->funcs->open) {
+ if (obj->funcs->open) {
ret = obj->funcs->open(obj, file_priv);
if (ret)
goto err_revoke;
- } else if (dev->driver->gem_open_object) {
- ret = dev->driver->gem_open_object(obj, file_priv);
- if (ret)
- goto err_revoke;
}
*handlep = handle;
{
struct drm_gem_object *obj =
container_of(kref, struct drm_gem_object, refcount);
- struct drm_device *dev = obj->dev;
- if (obj->funcs)
- obj->funcs->free(obj);
- else if (dev->driver->gem_free_object_unlocked)
- dev->driver->gem_free_object_unlocked(obj);
+ if (WARN_ON(!obj->funcs->free))
+ return;
+
+ obj->funcs->free(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
- * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
- * provided by the driver. Depending on their requirements, drivers can either
- * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * Set up the VMA to prepare mapping of the GEM object using the GEM object's
+ * vm_ops. Depending on their requirements, GEM objects can either
+ * provide a fault handler in their vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
* callers must verify access restrictions before calling this helper.
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
- * size, or if no gem_vm_ops are provided.
+ * size, or if no vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
- struct drm_device *dev = obj->dev;
int ret;
/* Check for valid size. */
vma->vm_private_data = obj;
- if (obj->funcs && obj->funcs->mmap) {
+ if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret) {
drm_gem_object_put(obj);
}
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
- if (obj->funcs && obj->funcs->vm_ops)
+ if (obj->funcs->vm_ops)
vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
else {
drm_gem_object_put(obj);
return -EINVAL;
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
- if (obj->funcs && obj->funcs->print_info)
+ if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
{
- if (obj->funcs && obj->funcs->pin)
+ if (obj->funcs->pin)
return obj->funcs->pin(obj);
- else if (obj->dev->driver->gem_prime_pin)
- return obj->dev->driver->gem_prime_pin(obj);
else
return 0;
}
void drm_gem_unpin(struct drm_gem_object *obj)
{
- if (obj->funcs && obj->funcs->unpin)
+ if (obj->funcs->unpin)
obj->funcs->unpin(obj);
- else if (obj->dev->driver->gem_prime_unpin)
- obj->dev->driver->gem_prime_unpin(obj);
}
void *drm_gem_vmap(struct drm_gem_object *obj)
{
void *vaddr;
- if (obj->funcs && obj->funcs->vmap)
+ if (obj->funcs->vmap)
vaddr = obj->funcs->vmap(obj);
- else if (obj->dev->driver->gem_prime_vmap)
- vaddr = obj->dev->driver->gem_prime_vmap(obj);
else
vaddr = ERR_PTR(-EOPNOTSUPP);
if (!vaddr)
return;
- if (obj->funcs && obj->funcs->vunmap)
+ if (obj->funcs->vunmap)
obj->funcs->vunmap(obj, vaddr);
- else if (obj->dev->driver->gem_prime_vunmap)
- obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
/**
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
- * &drm_driver.gem_free_object_unlocked callback.
+ * &drm_gem_object_funcs.free callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_gem_cma_object *cma_obj;
-
- cma_obj = to_drm_gem_cma_obj(gem_obj);
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
if (gem_obj->import_attach) {
if (cma_obj->vaddr)
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) {
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
*
* This function exports a scatter/gather table suitable for PRIME usage by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
- * set this as their &drm_driver.gem_prime_get_sg_table callback.
+ * set this as their &drm_gem_object_funcs.get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
- * driver's &drm_driver.gem_prime_vmap callback.
+ * driver's &drm_gem_object_funcs.vmap callback.
*
* Returns:
* The kernel virtual address of the CMA GEM object's backing store.
* This function removes a buffer exported via DRM PRIME from the kernel's
* virtual address space. This is a no-op because CMA buffers cannot be
* unmapped from kernel space. Drivers using the CMA helpers should set this
- * as their &drm_driver.gem_prime_vunmap callback.
+ * as their &drm_gem_object_funcs.vunmap callback.
*/
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *obj;
- void *vaddr;
+ struct dma_buf_map map;
+ int ret;
- vaddr = dma_buf_vmap(attach->dmabuf);
- if (!vaddr) {
+ ret = dma_buf_vmap(attach->dmabuf, &map);
+ if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, vaddr);
+ dma_buf_vunmap(attach->dmabuf, &map);
return obj;
}
cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = vaddr;
+ cma_obj->vaddr = map.vaddr;
return obj;
}
static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- int ret;
+ struct dma_buf_map map;
+ int ret = 0;
if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;
if (obj->import_attach) {
- shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
+ if (!ret)
+ shmem->vaddr = map.vaddr;
} else {
pgprot_t prot = PAGE_KERNEL;
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
+ if (!shmem->vaddr)
+ ret = -ENOMEM;
}
- if (!shmem->vaddr) {
- DRM_DEBUG_KMS("Failed to vmap pages\n");
- ret = -ENOMEM;
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
if (WARN_ON_ONCE(!shmem->vmap_use_count))
return;
return;
if (obj->import_attach)
- dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf, &map);
else
vunmap(shmem->vaddr);
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
unsigned int c = 0;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
- pl_flag = TTM_PL_FLAG_TOPDOWN;
+ invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
- gbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- invariant_flags;
+ gbo->placements[c++].flags = invariant_flags;
}
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
gbo->placements[c].mem_type = TTM_PL_SYSTEM;
- gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- invariant_flags;
+ gbo->placements[c++].flags = invariant_flags;
}
gbo->placement.num_placement = c;
}
}
-/*
- * Note that on error, drm_gem_vram_init will free the buffer object.
- */
-
-static int drm_gem_vram_init(struct drm_device *dev,
- struct drm_gem_vram_object *gbo,
- size_t size, unsigned long pg_align)
-{
- struct drm_vram_mm *vmm = dev->vram_mm;
- struct ttm_bo_device *bdev;
- int ret;
- size_t acc_size;
-
- if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
- kfree(gbo);
- return -EINVAL;
- }
- bdev = &vmm->bdev;
-
- gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
-
- ret = drm_gem_object_init(dev, &gbo->bo.base, size);
- if (ret) {
- kfree(gbo);
- return ret;
- }
-
- acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
-
- gbo->bo.bdev = bdev;
- drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
- DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-
- ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, false, acc_size,
- NULL, NULL, ttm_buffer_object_destroy);
- if (ret)
- /*
- * A failing ttm_bo_init will call ttm_buffer_object_destroy
- * to release gbo->bo.base and kfree gbo.
- */
- return ret;
-
- return 0;
-}
-
/**
* drm_gem_vram_create() - Creates a VRAM-backed GEM object
* @dev: the DRM device
* @size: the buffer size in bytes
* @pg_align: the buffer's alignment in multiples of the page size
*
+ * GEM objects are allocated by calling struct drm_driver.gem_create_object,
+ * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM
+ * object functions in struct drm_driver.gem_create_object. If no functions
+ * are set, the new GEM object will use the default functions from GEM VRAM
+ * helpers.
+ *
* Returns:
* A new instance of &struct drm_gem_vram_object on success, or
* an ERR_PTR()-encoded error code otherwise.
unsigned long pg_align)
{
struct drm_gem_vram_object *gbo;
+ struct drm_gem_object *gem;
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ struct ttm_bo_device *bdev;
int ret;
+ size_t acc_size;
+
+ if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+ return ERR_PTR(-EINVAL);
if (dev->driver->gem_create_object) {
- struct drm_gem_object *gem =
- dev->driver->gem_create_object(dev, size);
+ gem = dev->driver->gem_create_object(dev, size);
if (!gem)
return ERR_PTR(-ENOMEM);
gbo = drm_gem_vram_of_gem(gem);
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
if (!gbo)
return ERR_PTR(-ENOMEM);
+ gem = &gbo->bo.base;
}
- ret = drm_gem_vram_init(dev, gbo, size, pg_align);
- if (ret < 0)
+ if (!gem->funcs)
+ gem->funcs = &drm_gem_vram_object_funcs;
+
+ ret = drm_gem_object_init(dev, gem, size);
+ if (ret) {
+ kfree(gbo);
+ return ERR_PTR(ret);
+ }
+
+ bdev = &vmm->bdev;
+ acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
+
+ gbo->bo.bdev = bdev;
+ drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
+
+ /*
+ * A failing ttm_bo_init will call ttm_buffer_object_destroy
+ * to release gbo->bo.base and kfree gbo.
+ */
+ ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
+ &gbo->placement, pg_align, false, acc_size,
+ NULL, NULL, ttm_buffer_object_destroy);
+ if (ret)
return ERR_PTR(ret);
return gbo;
*/
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
{
- if (WARN_ON_ONCE(!gbo->pin_count))
+ if (WARN_ON_ONCE(!gbo->bo.pin_count))
return (s64)-ENODEV;
return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
}
static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
unsigned long pl_flag)
{
- int i, ret;
struct ttm_operation_ctx ctx = { false, false };
+ int ret;
- if (gbo->pin_count)
+ if (gbo->bo.pin_count)
goto out;
if (pl_flag)
drm_gem_vram_placement(gbo, pl_flag);
- for (i = 0; i < gbo->placement.num_placement; ++i)
- gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
return ret;
out:
- ++gbo->pin_count;
+ ttm_bo_pin(&gbo->bo);
return 0;
}
}
EXPORT_SYMBOL(drm_gem_vram_pin);
-static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
- int i, ret;
- struct ttm_operation_ctx ctx = { false, false };
-
- if (WARN_ON_ONCE(!gbo->pin_count))
- return 0;
-
- --gbo->pin_count;
- if (gbo->pin_count)
- return 0;
-
- for (i = 0; i < gbo->placement.num_placement ; ++i)
- gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
- if (ret < 0)
- return ret;
-
- return 0;
+ ttm_bo_unpin(&gbo->bo);
}
/**
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ret;
- ret = drm_gem_vram_unpin_locked(gbo);
+
+ drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
kmap->virtual = NULL;
}
+static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem)
+{
+ int ret;
+
+ drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+ ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
+ if (ret) {
+ swap(*new_mem, gbo->bo.mem);
+ drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
+ swap(*new_mem, gbo->bo.mem);
+ }
+ return ret;
+}
+
/*
* Helpers for struct drm_gem_object_funcs
*/
if (!tt)
return NULL;
- ret = ttm_tt_init(tt, bo, page_flags);
+ ret = ttm_tt_init(tt, bo, page_flags, ttm_cached);
if (ret < 0)
goto err_ttm_tt_init;
drm_gem_vram_bo_driver_evict_flags(gbo, placement);
}
-static void bo_driver_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem)
+static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_gem_vram_object *gbo;
gbo = drm_gem_vram_of_bo(bo);
- drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+ drm_gem_vram_bo_driver_move_notify(gbo, false, NULL);
+}
+
+static int bo_driver_move(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem)
+{
+ struct drm_gem_vram_object *gbo;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags,
- .move_notify = bo_driver_move_notify,
+ .move = bo_driver_move,
+ .delete_mem_notify = bo_driver_delete_mem_notify,
.io_mem_reserve = bo_driver_io_mem_reserve,
};
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
- true);
+ false, true);
if (ret)
return ret;
#ifdef CONFIG_PCI
/* drm_pci.c */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#else
-static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
return -EINVAL;
}
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
}
/**
- * drm_irq_by_busid - Get interrupt from bus ID
+ * drm_legacy_irq_by_busid - Get interrupt from bus ID
* @dev: DRM device
* @data: IOCTL parameter pointing to a drm_irq_busid structure
* @file_priv: DRM file private.
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;
if (obj->funcs && obj->funcs->export)
dmabuf = obj->funcs->export(obj, flags);
- else if (dev->driver->gem_prime_export)
- dmabuf = dev->driver->gem_prime_export(obj, flags);
else
dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf)) {
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
- * &drm_driver.gem_prime_export driver callback.
+ * &drm_gem_object_funcs.export callback.
*/
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
- if (obj->funcs)
- sgt = obj->funcs->get_sg_table(obj);
- else
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+ if (WARN_ON(!obj->funcs->get_sg_table))
+ return ERR_PTR(-ENOSYS);
+
+ sgt = obj->funcs->get_sg_table(obj);
+ if (IS_ERR(sgt))
+ return sgt;
ret = dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC);
/**
* drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
* @dma_buf: buffer to be mapped
+ * @map: the virtual address of the buffer
*
* Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
* callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
*
* Returns the kernel virtual address or NULL on failure.
*/
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
void *vaddr;
vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr))
- vaddr = NULL;
+ return PTR_ERR(vaddr);
- return vaddr;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
/**
* drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
* @dma_buf: buffer to be unmapped
- * @vaddr: the virtual address of the buffer
+ * @map: the virtual address of the buffer
*
* Releases a kernel virtual mapping. This can be used as the
* &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
*/
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, vaddr);
+ drm_gem_vunmap(obj, map->vaddr);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
}
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_simple_display_pipe *pipe;
}
static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_simple_display_pipe *pipe;
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-static const struct vm_operations_struct vm_ops = {
- .fault = etnaviv_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
- .gem_free_object_unlocked = etnaviv_gem_free_object,
- .gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = etnaviv_gem_prime_pin,
- .gem_prime_unpin = etnaviv_gem_prime_unpin,
- .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
- .gem_prime_vmap = etnaviv_gem_prime_vmap,
- .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
.gem_prime_mmap = etnaviv_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
return obj->ops->mmap(obj, vma);
}
-vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
+static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
mutex_unlock(&priv->gem_lock);
}
+static const struct vm_operations_struct vm_ops = {
+ .fault = etnaviv_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
+ .free = etnaviv_gem_free_object,
+ .pin = etnaviv_gem_prime_pin,
+ .unpin = etnaviv_gem_prime_unpin,
+ .get_sg_table = etnaviv_gem_prime_get_sg_table,
+ .vmap = etnaviv_gem_prime_vmap,
+ .vunmap = etnaviv_gem_prime_vunmap,
+ .vm_ops = &vm_ops,
+};
+
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
*obj = &etnaviv_obj->base;
+ (*obj)->funcs = &etnaviv_gem_object_funcs;
return 0;
}
static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
{
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr);
+
if (etnaviv_obj->vaddr)
- dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
- etnaviv_obj->vaddr);
+ dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
+ struct dma_buf_map map;
+ int ret;
+
lockdep_assert_held(&etnaviv_obj->lock);
- return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+ ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ if (ret)
+ return NULL;
+ return map.vaddr;
}
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
#include "exynos_drm_plane.h"
static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
}
static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
file->driver_priv = NULL;
}
-static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_RENDER_ALLOW),
.open = exynos_drm_open,
.lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
- .gem_free_object_unlocked = exynos_drm_gem_free_object,
- .gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = exynos_drm_gem_prime_import,
- .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
- .gem_prime_vmap = exynos_drm_gem_prime_vmap,
- .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
.gem_prime_mmap = exynos_drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
kfree(exynos_gem);
}
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
+ .free = exynos_drm_gem_free_object,
+ .get_sg_table = exynos_drm_gem_prime_get_sg_table,
+ .vmap = exynos_drm_gem_prime_vmap,
+ .vunmap = exynos_drm_gem_prime_vunmap,
+ .vm_ops = &exynos_drm_gem_vm_ops,
+};
+
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
exynos_gem->size = size;
obj = &exynos_gem->base;
+ obj->funcs = &exynos_drm_gem_object_funcs;
+
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
}
static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
}
static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
clock_recovery = false;
DRM_DEBUG_KMS("Start train\n");
- reg = DP | DP_LINK_TRAIN_PAT_1;
-
+ reg = DP | DP_LINK_TRAIN_PAT_1;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
cr_tries = 0;
DRM_DEBUG_KMS("\n");
- reg = DP | DP_LINK_TRAIN_PAT_2;
+ reg = DP | DP_LINK_TRAIN_PAT_2;
for (;;) {
DRM_INFO("failed to retrieve link info, disabling eDP\n");
drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
- goto err_priv;
+ goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],
#include <drm/drm_gem_framebuffer_helper.h>
#include "framebuffer.h"
+#include "gem.h"
#include "gtt.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
if (backing) {
+ backing->gem.funcs = &psb_gem_object_funcs;
drm_gem_private_object_init(dev, &backing->gem, aligned_size);
return backing;
}
#include "psb_drv.h"
-void psb_gem_free_object(struct drm_gem_object *obj)
+static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
+
+static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
return -EINVAL;
}
+static const struct vm_operations_struct psb_gem_vm_ops = {
+ .fault = psb_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+const struct drm_gem_object_funcs psb_gem_object_funcs = {
+ .free = psb_gem_free_object,
+ .vm_ops = &psb_gem_vm_ops,
+};
+
/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC;
}
+ r->gem.funcs = &psb_gem_object_funcs;
/* Initialize the extra goodies GEM needs to do all the hard work */
if (drm_gem_object_init(dev, &r->gem, size) != 0) {
psb_gtt_free_range(dev, r);
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-vm_fault_t psb_gem_fault(struct vm_fault *vmf)
+static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
#ifndef _GEM_H
#define _GEM_H
+extern const struct drm_gem_object_funcs psb_gem_object_funcs;
+
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
u64 size, u32 *handlep, int stolen, u32 align);
+
#endif
.runtime_idle = psb_runtime_idle,
};
-static const struct vm_operations_struct psb_gem_vm_ops = {
- .fault = psb_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations psb_gem_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .gem_free_object_unlocked = psb_gem_free_object,
- .gem_vm_ops = &psb_gem_vm_ops,
-
.dumb_create = psb_gem_dumb_create,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
-extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
#include "hibmc_drm_regs.h"
struct hibmc_display_panel_pll {
- unsigned long M;
- unsigned long N;
- unsigned long OD;
- unsigned long POD;
+ u64 M;
+ u64 N;
+ u64 OD;
+ u64 POD;
};
struct hibmc_dislay_pll_config {
- unsigned long hdisplay;
- unsigned long vdisplay;
+ u64 hdisplay;
+ u64 vdisplay;
u32 pll1_config_value;
u32 pll2_config_value;
};
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
};
-#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
-
static int hibmc_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_plane_state *state = plane->state;
u32 reg;
s64 gpu_addr = 0;
- unsigned int line_l;
- struct hibmc_drm_private *priv = plane->dev->dev_private;
+ u32 line_l;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
struct drm_gem_vram_object *gbo;
if (!state->fb)
DRM_FORMAT_ABGR8888
};
-static struct drm_plane_funcs hibmc_plane_funcs = {
+static const struct drm_plane_funcs hibmc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.atomic_update = hibmc_plane_atomic_update,
};
-static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, u32 dpms)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
- unsigned int reg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
+ u32 reg;
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
}
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
- unsigned int reg;
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ u32 reg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
- unsigned int reg;
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ u32 reg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
hibmc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
- int i = 0;
+ size_t i = 0;
int vrefresh = drm_mode_vrefresh(mode);
if (vrefresh < 59 || vrefresh > 61)
return MODE_BAD;
}
-static unsigned int format_pll_reg(void)
+static u32 format_pll_reg(void)
{
- unsigned int pllreg = 0;
+ u32 pllreg = 0;
struct hibmc_display_panel_pll pll = {0};
/*
return pllreg;
}
-static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
+static void set_vclock_hisilicon(struct drm_device *dev, u64 pll)
{
u32 val;
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
val = readl(priv->mmio + CRT_PLL1_HS);
val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
writel(val, priv->mmio + CRT_PLL1_HS);
}
-static void get_pll_config(unsigned long x, unsigned long y,
- u32 *pll1, u32 *pll2)
+static void get_pll_config(u64 x, u64 y, u32 *pll1, u32 *pll2)
{
- int i;
- int count = ARRAY_SIZE(hibmc_pll_table);
+ size_t i;
+ size_t count = ARRAY_SIZE(hibmc_pll_table);
for (i = 0; i < count; i++) {
if (hibmc_pll_table[i].hdisplay == x &&
* FPGA only supports 7 predefined pixel clocks, and clock select is
* in bit 4:0 of new register 0x802a8.
*/
-static unsigned int display_ctrl_adjust(struct drm_device *dev,
- struct drm_display_mode *mode,
- unsigned int ctrl)
+static u32 display_ctrl_adjust(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ u32 ctrl)
{
- unsigned long x, y;
+ u64 x, y;
u32 pll1; /* bit[31:0] of PLL */
u32 pll2; /* bit[63:32] of PLL */
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
x = mode->hdisplay;
y = mode->vdisplay;
static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- unsigned int val;
+ u32 val;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_device *dev = crtc->dev;
- struct hibmc_drm_private *priv = dev->dev_private;
- int width = mode->hsync_end - mode->hsync_start;
- int height = mode->vsync_end - mode->vsync_start;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 width = mode->hsync_end - mode->hsync_start;
+ u32 height = mode->vsync_end - mode->vsync_start;
writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- unsigned int reg;
+ u32 reg;
struct drm_device *dev = crtc->dev;
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
void __iomem *mmio = priv->mmio;
u16 *r, *g, *b;
- unsigned int reg;
- int i;
+ u32 reg;
+ u32 i;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
for (i = 0; i < crtc->gamma_size; i++) {
- unsigned int offset = i << 2;
+ u32 offset = i << 2;
u8 red = *r++ >> 8;
u8 green = *g++ >> 8;
u8 blue = *b++ >> 8;
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
- struct hibmc_drm_private *priv =
- (struct hibmc_drm_private *)dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 status;
status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
-void hibmc_set_power_mode(struct hibmc_drm_private *priv,
- unsigned int power_mode)
+void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode)
{
- unsigned int control_value = 0;
+ u32 control_value = 0;
void __iomem *mmio = priv->mmio;
- unsigned int input = 1;
+ u32 input = 1;
if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
return;
void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
{
- unsigned int gate_reg;
- unsigned int mode;
+ u32 gate_reg;
+ u32 mode;
void __iomem *mmio = priv->mmio;
/* Get current power mode. */
static void hibmc_hw_config(struct hibmc_drm_private *priv)
{
- unsigned int reg;
+ u32 reg;
/* On hardware reset, power mode 0 is default. */
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
static int hibmc_unload(struct drm_device *dev)
{
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
drm_atomic_helper_shutdown(dev);
drm_dev_put(dev);
}
-static struct pci_device_id hibmc_pci_table[] = {
+static const struct pci_device_id hibmc_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0x1711) },
{0,}
};
#ifndef HIBMC_DRM_DRV_H
#define HIBMC_DRM_DRV_H
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-struct drm_device;
+struct hibmc_connector {
+ struct drm_connector base;
+
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data bit_data;
+};
struct hibmc_drm_private {
/* hw */
void __iomem *mmio;
void __iomem *fb_map;
- unsigned long fb_base;
- unsigned long fb_size;
+ resource_size_t fb_base;
+ resource_size_t fb_size;
/* drm */
struct drm_device *dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct hibmc_connector connector;
bool mode_config_initialized;
};
+static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_connector, base);
+}
+
+static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
- unsigned int power_mode);
+ u32 power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
- unsigned int gate);
+ u32 gate);
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
extern const struct drm_mode_config_funcs hibmc_mode_funcs;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Tian Tao <tiantao6@hisilicon.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+#define GPIO_DATA 0x0802A0
+#define GPIO_DATA_DIRECTION 0x0802A4
+
+#define I2C_SCL_MASK BIT(0)
+#define I2C_SDA_MASK BIT(1)
+
+static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+{
+ struct hibmc_connector *hibmc_connector = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if (value) {
+ tmp_dir &= ~mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ } else {
+ u32 tmp_data = readl(priv->mmio + GPIO_DATA);
+
+ tmp_data &= ~mask;
+ writel(tmp_data, priv->mmio + GPIO_DATA);
+
+ tmp_dir |= mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ }
+}
+
+static int hibmc_get_i2c_signal(void *data, u32 mask)
+{
+ struct hibmc_connector *hibmc_connector = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if ((tmp_dir & mask) != mask) {
+ tmp_dir &= ~mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ }
+
+ return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
+}
+
+static void hibmc_ddc_setsda(void *data, int state)
+{
+ hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
+}
+
+static void hibmc_ddc_setscl(void *data, int state)
+{
+ hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
+}
+
+static int hibmc_ddc_getsda(void *data)
+{
+ return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
+}
+
+static int hibmc_ddc_getscl(void *data)
+{
+ return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+}
+
+int hibmc_ddc_create(struct drm_device *drm_dev,
+ struct hibmc_connector *connector)
+{
+ connector->adapter.owner = THIS_MODULE;
+ connector->adapter.class = I2C_CLASS_DDC;
+ snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+ connector->adapter.dev.parent = &drm_dev->pdev->dev;
+ i2c_set_adapdata(&connector->adapter, connector);
+ connector->adapter.algo_data = &connector->bit_data;
+
+ connector->bit_data.udelay = 20;
+ connector->bit_data.timeout = usecs_to_jiffies(2000);
+ connector->bit_data.data = connector;
+ connector->bit_data.setsda = hibmc_ddc_setsda;
+ connector->bit_data.setscl = hibmc_ddc_setscl;
+ connector->bit_data.getsda = hibmc_ddc_getsda;
+ connector->bit_data.getscl = hibmc_ddc_getscl;
+
+ return i2c_bit_add_bus(&connector->adapter);
+}
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
int count;
+ void *edid;
+ struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+
+ edid = drm_get_edid(connector, &hibmc_connector->adapter);
+ if (edid) {
+ drm_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+ if (count)
+ goto out;
+ }
count = drm_add_modes_noedid(connector,
connector->dev->mode_config.max_width,
connector->dev->mode_config.max_height);
drm_set_preferred_mode(connector, 1024, 768);
+out:
+ kfree(edid);
return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
return MODE_OK;
}
+static void hibmc_connector_destroy(struct drm_connector *connector)
+{
+ struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+
+ i2c_del_adapter(&hibmc_connector->adapter);
+ drm_connector_cleanup(connector);
+}
+
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
static const struct drm_connector_funcs hibmc_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
+ .destroy = hibmc_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
{
u32 reg;
struct drm_device *dev = encoder->dev;
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = priv->dev;
+ struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
- struct drm_connector *connector = &priv->connector;
+ struct drm_connector *connector = &hibmc_connector->base;
int ret;
+ ret = hibmc_ddc_create(dev, hibmc_connector);
+ if (ret) {
+ drm_err(dev, "failed to create ddc: %d\n", ret);
+ return ret;
+ }
+
encoder->possible_crtcs = 0x1;
ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &hibmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &hibmc_connector->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
}
+
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
#endif
static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
}
static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
i915_gem_object_unpin_pages(obj);
}
-static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ void *vaddr;
- return i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct kmem_cache *slab_objects;
} global;
+static const struct drm_gem_object_funcs i915_gem_object_funcs;
+
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
- return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ struct drm_i915_gem_object *obj;
+
+ obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ if (!obj)
+ return NULL;
+ obj->base.funcs = &i915_gem_object_funcs;
+
+ return obj;
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
i915_gem_flush_free_objects(i915);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
return 0;
}
+static const struct drm_gem_object_funcs i915_gem_object_funcs = {
+ .free = i915_gem_free_object,
+ .close = i915_gem_close_object,
+ .export = i915_gem_prime_export,
+};
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
-void i915_gem_free_object(struct drm_gem_object *obj);
-
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table *
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
+ struct dma_buf_map map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;
goto out_obj;
}
- dma_map = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
err = 0;
out_dma_map:
- dma_buf_vunmap(dmabuf, dma_map);
+ dma_buf_vunmap(dmabuf, &map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
}
memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_vunmap(dmabuf, ptr);
+ dma_buf_vunmap(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
}
i915_gem_object_put(obj);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
memset(ptr, 0xc5, dmabuf->size);
err = 0;
- dma_buf_vunmap(dmabuf, ptr);
+ dma_buf_vunmap(dmabuf, &map);
out:
dma_buf_put(dmabuf);
return err;
kfree(mock);
}
-static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
+ void *vaddr;
- return vm_map_ram(mock->pages, mock->npages, 0);
+ vaddr = vm_map_ram(mock->pages, mock->npages, 0);
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
- vm_unmap_ram(vaddr, mock->npages);
+ vm_unmap_ram(map->vaddr, mock->npages);
}
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
- .gem_close_object = i915_gem_close_object,
- .gem_free_object_unlocked = i915_gem_free_object,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
.name = "mock",
.driver_features = DRIVER_GEM,
.release = mock_device_release,
-
- .gem_close_object = i915_gem_close_object,
- .gem_free_object_unlocked = i915_gem_free_object,
};
static void release_dev(struct device *dev)
* Copyright 2019 NXP.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
#include <linux/platform_device.h>
}
static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
}
static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
static struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
- u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
if (!fb || !state->crtc || !state->visible)
return;
- pixel_format = state->fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
}
static void ipu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
}
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
u32 addr;
u32 id;
u32 cmd;
-} __packed;
+} __aligned(16);
+
+struct ingenic_dma_hwdescs {
+ struct ingenic_dma_hwdesc hwdesc_f0;
+ struct ingenic_dma_hwdesc hwdesc_f1;
+ struct ingenic_dma_hwdesc hwdesc_pal;
+ u16 palette[256] __aligned(16);
+};
struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
unsigned int max_width, max_height;
+ const u32 *formats_f0, *formats_f1;
+ unsigned int num_formats_f0, num_formats_f1;
};
struct ingenic_drm {
struct clk *lcd_clk, *pix_clk;
const struct jz_soc_info *soc_info;
- struct ingenic_dma_hwdesc *dma_hwdesc_f0, *dma_hwdesc_f1;
- dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
+ struct ingenic_dma_hwdescs *dma_hwdescs;
+ dma_addr_t dma_hwdescs_phys;
bool panel_is_sharp;
bool no_vblank;
-};
-static const u32 ingenic_drm_primary_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
+ /*
+ * clk_mutex is used to synchronize the pixel clock rate update with
+ * the VBLANK. When the pixel clock's parent clock needs to be updated,
+ * clock_nb's notifier function will lock the mutex, then wait until the
+ * next VBLANK. At that point, the parent clock's rate can be updated,
+ * and the mutex is then unlocked. If an atomic commit happens in the
+ * meantime, it will lock on the mutex, effectively waiting until the
+ * clock update process finishes. Finally, the pixel clock's rate will
+ * be recomputed when the mutex has been released, in the pending atomic
+ * commit, or a future one.
+ */
+ struct mutex clk_mutex;
+ bool update_clk_rate;
+ struct notifier_block clock_nb;
};
static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
return container_of(crtc, struct ingenic_drm, crtc);
}
+static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb)
+{
+ return container_of(nb, struct ingenic_drm, clock_nb);
+}
+
+static int ingenic_drm_update_pixclk(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct ingenic_drm *priv = drm_nb_get_priv(nb);
+
+ switch (action) {
+ case PRE_RATE_CHANGE:
+ mutex_lock(&priv->clk_mutex);
+ priv->update_clk_rate = true;
+ drm_crtc_wait_one_vblank(&priv->crtc);
+ return NOTIFY_OK;
+ default:
+ mutex_unlock(&priv->clk_mutex);
+ return NOTIFY_OK;
+ }
+}
+
static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
}
static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
unsigned int var;
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
+ if (state->gamma_lut &&
+ drm_color_lut_size(state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
+ dev_dbg(priv->dev, "Invalid palette size\n");
+ return -EINVAL;
+ }
+
if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) {
f1_state = drm_atomic_get_plane_state(state->state, &priv->f1);
if (IS_ERR(f1_state))
if (drm_atomic_crtc_needs_modeset(state)) {
ingenic_drm_crtc_update_timings(priv, &state->mode);
+ priv->update_clk_rate = true;
+ }
+ if (priv->update_clk_rate) {
+ mutex_lock(&priv->clk_mutex);
clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000);
+ priv->update_clk_rate = false;
+ mutex_unlock(&priv->clk_mutex);
}
if (event) {
case DRM_FORMAT_RGB565:
ctrl |= JZ_LCD_OSDCTRL_BPP_15_16;
break;
+ case DRM_FORMAT_RGB888:
+ ctrl |= JZ_LCD_OSDCTRL_BPP_24_COMP;
+ break;
case DRM_FORMAT_XRGB8888:
ctrl |= JZ_LCD_OSDCTRL_BPP_18_24;
break;
+ case DRM_FORMAT_XRGB2101010:
+ ctrl |= JZ_LCD_OSDCTRL_BPP_30;
+ break;
}
regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
JZ_LCD_OSDCTRL_BPP_MASK, ctrl);
} else {
switch (fourcc) {
+ case DRM_FORMAT_C8:
+ ctrl |= JZ_LCD_CTRL_BPP_8;
+ break;
case DRM_FORMAT_XRGB1555:
ctrl |= JZ_LCD_CTRL_RGB555;
fallthrough;
case DRM_FORMAT_RGB565:
ctrl |= JZ_LCD_CTRL_BPP_15_16;
break;
+ case DRM_FORMAT_RGB888:
+ ctrl |= JZ_LCD_CTRL_BPP_24_COMP;
+ break;
case DRM_FORMAT_XRGB8888:
ctrl |= JZ_LCD_CTRL_BPP_18_24;
break;
+ case DRM_FORMAT_XRGB2101010:
+ ctrl |= JZ_LCD_CTRL_BPP_30;
+ break;
}
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
}
}
+static void ingenic_drm_update_palette(struct ingenic_drm *priv,
+ const struct drm_color_lut *lut)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dma_hwdescs->palette); i++) {
+ u16 color = drm_color_lut_extract(lut[i].red, 5) << 11
+ | drm_color_lut_extract(lut[i].green, 6) << 5
+ | drm_color_lut_extract(lut[i].blue, 5);
+
+ priv->dma_hwdescs->palette[i] = color;
+ }
+}
+
static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *oldstate)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state;
struct ingenic_dma_hwdesc *hwdesc;
- unsigned int width, height, cpp;
+ unsigned int width, height, cpp, offset;
dma_addr_t addr;
+ u32 fourcc;
if (state && state->fb) {
+ crtc_state = state->crtc->state;
+
addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
width = state->src_w >> 16;
height = state->src_h >> 16;
cpp = state->fb->format->cpp[0];
if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
- hwdesc = priv->dma_hwdesc_f0;
+ hwdesc = &priv->dma_hwdescs->hwdesc_f0;
else
- hwdesc = priv->dma_hwdesc_f1;
+ hwdesc = &priv->dma_hwdescs->hwdesc_f1;
hwdesc->addr = addr;
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
- if (drm_atomic_crtc_needs_modeset(state->crtc->state))
- ingenic_drm_plane_config(priv->dev, plane,
- state->fb->format->format);
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ fourcc = state->fb->format->format;
+
+ ingenic_drm_plane_config(priv->dev, plane, fourcc);
+
+ if (fourcc == DRM_FORMAT_C8)
+ offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal);
+ else
+ offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+
+ priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset;
+
+ crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8;
+ }
+
+ if (crtc_state->color_mgmt_changed)
+ ingenic_drm_update_palette(priv, crtc_state->gamma_lut->data);
}
}
component_unbind_all(priv->dev, &priv->drm);
}
+static void __maybe_unused ingenic_drm_release_rmem(void *d)
+{
+ of_reserved_mem_device_release(d);
+}
+
static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
void __iomem *base;
long parent_rate;
unsigned int i, clone_mask = 0;
+ dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
int ret, irq;
soc_info = of_device_get_match_data(dev);
return -EINVAL;
}
+ if (IS_ENABLED(CONFIG_OF_RESERVED_MEM)) {
+ ret = of_reserved_mem_device_init(dev);
+
+ if (ret && ret != -ENODEV)
+ dev_warn(dev, "Failed to get reserved memory: %d\n", ret);
+
+ if (!ret) {
+ ret = devm_add_action_or_reset(dev, ingenic_drm_release_rmem, dev);
+ if (ret)
+ return ret;
+ }
+ }
+
priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
struct ingenic_drm, drm);
if (IS_ERR(priv))
return PTR_ERR(priv->pix_clk);
}
- priv->dma_hwdesc_f1 = dmam_alloc_coherent(dev, sizeof(*priv->dma_hwdesc_f1),
- &priv->dma_hwdesc_phys_f1,
- GFP_KERNEL);
- if (!priv->dma_hwdesc_f1)
+ priv->dma_hwdescs = dmam_alloc_coherent(dev,
+ sizeof(*priv->dma_hwdescs),
+ &priv->dma_hwdescs_phys,
+ GFP_KERNEL);
+ if (!priv->dma_hwdescs)
return -ENOMEM;
- priv->dma_hwdesc_f1->next = priv->dma_hwdesc_phys_f1;
- priv->dma_hwdesc_f1->id = 0xf1;
- if (priv->soc_info->has_osd) {
- priv->dma_hwdesc_f0 = dmam_alloc_coherent(dev,
- sizeof(*priv->dma_hwdesc_f0),
- &priv->dma_hwdesc_phys_f0,
- GFP_KERNEL);
- if (!priv->dma_hwdesc_f0)
- return -ENOMEM;
+ /* Configure DMA hwdesc for foreground0 plane */
+ dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+ priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0;
+ priv->dma_hwdescs->hwdesc_f0.id = 0xf0;
- priv->dma_hwdesc_f0->next = priv->dma_hwdesc_phys_f0;
- priv->dma_hwdesc_f0->id = 0xf0;
- }
+ /* Configure DMA hwdesc for foreground1 plane */
+ dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1);
+ priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1;
+ priv->dma_hwdescs->hwdesc_f1.id = 0xf1;
+
+ /* Configure DMA hwdesc for palette */
+ priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+ priv->dma_hwdescs->hwdesc_pal.id = 0xc0;
+ priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, palette);
+ priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL
+ | (sizeof(priv->dma_hwdescs->palette) / 4);
if (soc_info->has_osd)
priv->ipu_plane = drm_plane_from_index(drm, 0);
ret = drm_universal_plane_init(drm, &priv->f1, 1,
&ingenic_drm_primary_plane_funcs,
- ingenic_drm_primary_formats,
- ARRAY_SIZE(ingenic_drm_primary_formats),
+ priv->soc_info->formats_f1,
+ priv->soc_info->num_formats_f1,
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev, "Failed to register plane: %i\n", ret);
return ret;
}
+ drm_crtc_enable_color_mgmt(&priv->crtc, 0, false,
+ ARRAY_SIZE(priv->dma_hwdescs->palette));
+
if (soc_info->has_osd) {
drm_plane_helper_add(&priv->f0,
&ingenic_drm_plane_helper_funcs);
ret = drm_universal_plane_init(drm, &priv->f0, 1,
&ingenic_drm_primary_plane_funcs,
- ingenic_drm_primary_formats,
- ARRAY_SIZE(ingenic_drm_primary_formats),
+ priv->soc_info->formats_f0,
+ priv->soc_info->num_formats_f0,
NULL, DRM_PLANE_TYPE_OVERLAY,
NULL);
if (ret) {
}
/* Set address of our DMA descriptor chain */
- regmap_write(priv->map, JZ_REG_LCD_DA0, priv->dma_hwdesc_phys_f0);
- regmap_write(priv->map, JZ_REG_LCD_DA1, priv->dma_hwdesc_phys_f1);
+ regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0);
+ regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1);
/* Enable OSD if available */
if (soc_info->has_osd)
regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
+ mutex_init(&priv->clk_mutex);
+ priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
+
+ parent_clk = clk_get_parent(priv->pix_clk);
+ ret = clk_notifier_register(parent_clk, &priv->clock_nb);
+ if (ret) {
+ dev_err(dev, "Unable to register clock notifier\n");
+ goto err_devclk_disable;
+ }
+
ret = drm_dev_register(drm, 0);
if (ret) {
dev_err(dev, "Failed to register DRM driver\n");
- goto err_devclk_disable;
+ goto err_clk_notifier_unregister;
}
drm_fbdev_generic_setup(drm, 32);
return 0;
+err_clk_notifier_unregister:
+ clk_notifier_unregister(parent_clk, &priv->clock_nb);
err_devclk_disable:
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
static void ingenic_drm_unbind(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
+ struct clk *parent_clk = clk_get_parent(priv->pix_clk);
+ clk_notifier_unregister(parent_clk, &priv->clock_nb);
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
clk_disable_unprepare(priv->pix_clk);
return 0;
}
+static const u32 jz4740_formats[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4725b_formats_f1[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4725b_formats_f0[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4770_formats_f1[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB2101010,
+};
+
+static const u32 jz4770_formats_f0[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB2101010,
+};
+
static const struct jz_soc_info jz4740_soc_info = {
.needs_dev_clk = true,
.has_osd = false,
.max_width = 800,
.max_height = 600,
+ .formats_f1 = jz4740_formats,
+ .num_formats_f1 = ARRAY_SIZE(jz4740_formats),
+ /* JZ4740 has only one plane */
};
static const struct jz_soc_info jz4725b_soc_info = {
.has_osd = true,
.max_width = 800,
.max_height = 600,
+ .formats_f1 = jz4725b_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4725b_formats_f1),
+ .formats_f0 = jz4725b_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0),
};
static const struct jz_soc_info jz4770_soc_info = {
.has_osd = true,
.max_width = 1280,
.max_height = 720,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
static const struct of_device_id ingenic_drm_of_match[] = {
#define JZ_LCD_CTRL_BPP_8 0x3
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
+#define JZ_LCD_CTRL_BPP_24_COMP 0x6
+#define JZ_LCD_CTRL_BPP_30 0x7
#define JZ_LCD_CTRL_BPP_MASK (JZ_LCD_CTRL_RGB555 | 0x7)
#define JZ_LCD_CMD_SOF_IRQ BIT(31)
#define JZ_LCD_OSDCTRL_CHANGE BIT(3)
#define JZ_LCD_OSDCTRL_BPP_15_16 0x4
#define JZ_LCD_OSDCTRL_BPP_18_24 0x5
+#define JZ_LCD_OSDCTRL_BPP_24_COMP 0x6
#define JZ_LCD_OSDCTRL_BPP_30 0x7
#define JZ_LCD_OSDCTRL_BPP_MASK (JZ_LCD_OSDCTRL_RGB555 | 0x7)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
}
static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .gem_free_object_unlocked = mtk_drm_gem_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = mtk_drm_gem_prime_import,
- .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
- .gem_prime_vmap = mtk_drm_gem_prime_vmap,
- .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
+static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
+ .free = mtk_drm_gem_free_object,
+ .get_sg_table = mtk_gem_prime_get_sg_table,
+ .vmap = mtk_drm_gem_prime_vmap,
+ .vunmap = mtk_drm_gem_prime_vunmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM);
+ mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
+
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
if (ret < 0) {
DRM_ERROR("failed to initialize gem object\n");
};
static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
}
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
}
static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
#include <linux/ktime.h>
#include <linux/bits.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
}
static void dpu_crtc_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
}
static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
}
static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
}
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
}
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
-static const struct vm_operations_struct vm_ops = {
- .fault = msm_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
- .gem_free_object_unlocked = msm_gem_free_object,
- .gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = msm_gem_prime_pin,
- .gem_prime_unpin = msm_gem_prime_unpin,
- .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_vmap = msm_gem_prime_vmap,
- .gem_prime_vunmap = msm_gem_prime_vunmap,
.gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-vm_fault_t msm_gem_fault(struct vm_fault *vmf)
+static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
return ret;
}
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs msm_gem_object_funcs = {
+ .free = msm_gem_free_object,
+ .pin = msm_gem_prime_pin,
+ .unpin = msm_gem_prime_unpin,
+ .get_sg_table = msm_gem_prime_get_sg_table,
+ .vmap = msm_gem_prime_vmap,
+ .vunmap = msm_gem_prime_vunmap,
+ .vm_ops = &vm_ops,
+};
+
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
+ (*obj)->funcs = &msm_gem_object_funcs;
return 0;
}
}
static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
struct drm_device *drm = mxsfb->drm;
}
static void mxsfb_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
struct drm_device *drm = mxsfb->drm;
*/
#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
#include "nouveau_drv.h"
#include "nouveau_chan.h"
static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg);
+static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/*
* NV10-NV40 tiling helpers
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- WARN_ON(nvbo->pin_refcnt > 0);
+ WARN_ON(nvbo->bo.pin_count > 0);
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
}
static void
-set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
- uint32_t domain, uint32_t flags)
+set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
{
*n = 0;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
- struct nvif_mmu *mmu = &drm->client.mmu;
- const u8 type = mmu->type[drm->ttm.type_vram].type;
-
pl[*n].mem_type = TTM_PL_VRAM;
- pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
-
- /* Some BARs do not support being ioremapped WC */
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
- type & NVIF_MEM_UNCACHED)
- pl[*n].flags &= ~TTM_PL_FLAG_WC;
-
+ pl[*n].flags = 0;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
pl[*n].mem_type = TTM_PL_TT;
- pl[*n].flags = flags;
-
- if (drm->agp.bridge)
- pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
-
+ pl[*n].flags = 0;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
pl[*n].mem_type = TTM_PL_SYSTEM;
- pl[(*n)++].flags = flags;
+ pl[(*n)++].flags = 0;
}
}
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
- struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_placement *pl = &nvbo->placement;
- uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
- TTM_PL_MASK_CACHING) |
- (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements;
- set_placement_list(drm, nvbo->placements, &pl->num_placement,
- domain, flags);
+ set_placement_list(nvbo->placements, &pl->num_placement, domain);
pl->busy_placement = nvbo->busy_placements;
- set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
- domain | busy, flags);
+ set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
+ domain | busy);
set_placement_range(nvbo, domain);
}
}
}
- if (nvbo->pin_refcnt) {
+ if (nvbo->bo.pin_count) {
bool error = evict;
switch (bo->mem.mem_type) {
bo->mem.mem_type, domain);
ret = -EBUSY;
}
- nvbo->pin_refcnt++;
+ ttm_bo_pin(&nvbo->bo);
goto out;
}
goto out;
}
- nvbo->pin_refcnt++;
nouveau_bo_placement_set(nvbo, domain, 0);
-
- /* drop pin_refcnt temporarily, so we don't trip the assertion
- * in nouveau_bo_move() that makes sure we're not trying to
- * move a pinned buffer
- */
- nvbo->pin_refcnt--;
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
- nvbo->pin_refcnt++;
+
+ ttm_bo_pin(&nvbo->bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret, ref;
+ int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
- ref = --nvbo->pin_refcnt;
- WARN_ON_ONCE(ref < 0);
- if (ref)
- goto out;
-
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
- break;
- case TTM_PL_TT:
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
- break;
- default:
- break;
- }
-
- ret = nouveau_bo_validate(nvbo, false, false);
- if (ret == 0) {
+ ttm_bo_unpin(&nvbo->bo);
+ if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->mem.size;
}
}
-out:
ttm_bo_unreserve(bo);
- return ret;
+ return 0;
}
int
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i;
if (!ttm_dma)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; i++)
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
PAGE_SIZE, DMA_TO_DEVICE);
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i;
if (!ttm_dma)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; i++)
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_FROM_DEVICE);
}
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
}
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
- ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
+ ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
if (ret == 0) {
}
static int
-nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_reg)
{
- struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_TT,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
struct ttm_placement placement;
struct ttm_resource tmp_reg;
tmp_reg = *new_reg;
tmp_reg.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
if (ret)
return ret;
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret)
goto out;
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
+ ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, &tmp_reg);
out:
ttm_resource_free(bo, &tmp_reg);
return ret;
}
static int
-nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_reg)
{
- struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_TT,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
struct ttm_placement placement;
struct ttm_resource tmp_reg;
tmp_reg = *new_reg;
tmp_reg.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
- if (ret)
- goto out;
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(ret != 0))
+ return ret;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ttm_bo_assign_mem(bo, &tmp_reg);
+ ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg);
if (ret)
goto out;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ if (new_reg->mem_type == TTM_PL_TT) {
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
+ if (ret)
+ return ret;
+ }
+
+ nouveau_bo_move_ntfy(bo, evict, new_reg);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
- return ret;
+ goto out_ntfy;
- if (nvbo->pin_refcnt)
+ if (nvbo->bo.pin_count)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)
- return ret;
+ goto out_ntfy;
}
/* Fake bo copy. */
goto out;
}
+ if (old_reg->mem_type == TTM_PL_SYSTEM &&
+ new_reg->mem_type == TTM_PL_TT) {
+ ttm_bo_move_null(bo, new_reg);
+ goto out;
+ }
+
+ if (old_reg->mem_type == TTM_PL_TT &&
+ new_reg->mem_type == TTM_PL_SYSTEM) {
+ nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_reg);
+ goto out;
+ }
+
/* Hardware assisted copy. */
if (drm->ttm.move) {
if (new_reg->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict,
- ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_flipd(bo, evict, ctx,
+ new_reg);
else if (old_reg->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict,
- ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_flips(bo, evict, ctx,
+ new_reg);
else
- ret = nouveau_bo_move_m2mf(bo, evict,
- ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_m2mf(bo, evict, ctx,
+ new_reg);
if (!ret)
goto out;
}
/* Fallback to software copy. */
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
- if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
else
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
}
-
+out_ntfy:
+ if (ret) {
+ swap(*new_reg, bo->mem);
+ nouveau_bo_move_ntfy(bo, false, new_reg);
+ swap(*new_reg, bo->mem);
+ }
return ret;
}
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ const u8 type = mmu->type[drm->ttm.type_vram].type;
int ret;
mutex_lock(&drm->ttm.io_reserve_mutex);
reg->bus.offset = (reg->start << PAGE_SHIFT) +
drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma;
+ reg->bus.caching = ttm_write_combined;
}
#endif
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
reg->bus.offset = (reg->start << PAGE_SHIFT) +
device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
+
+ /* Some BARs do not support being ioremapped WC */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ type & NVIF_MEM_UNCACHED)
+ reg->bus.caching = ttm_uncached;
+ else
+ reg->bus.caching = ttm_write_combined;
+
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
union {
struct nv50_mem_map_v0 nv50;
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
-static int
-nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
!nvbo->kind)
return 0;
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
- 0);
+ if (bo->mem.mem_type != TTM_PL_SYSTEM)
+ return 0;
+
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+
+ } else {
+ /* make sure bo is in mappable vram */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
+ bo->mem.start + bo->mem.num_pages < mappable)
+ return 0;
- ret = nouveau_bo_validate(nvbo, false, false);
- if (ret)
- return ret;
+ for (i = 0; i < nvbo->placement.num_placement; ++i) {
+ nvbo->placements[i].fpfn = 0;
+ nvbo->placements[i].lpfn = mappable;
}
- return 0;
- }
- /* make sure bo is in mappable vram */
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->mem.start + bo->mem.num_pages < mappable)
- return 0;
+ for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+ nvbo->busy_placements[i].fpfn = 0;
+ nvbo->busy_placements[i].lpfn = mappable;
+ }
- for (i = 0; i < nvbo->placement.num_placement; ++i) {
- nvbo->placements[i].fpfn = 0;
- nvbo->placements[i].lpfn = mappable;
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = 0;
- nvbo->busy_placements[i].lpfn = mappable;
- }
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(ret))
+ return VM_FAULT_SIGBUS;
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, false);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ return 0;
}
static int
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* make userspace faulting work */
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
ttm_dma->dma_address, ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
-#if IS_ENABLED(CONFIG_AGP)
- if (drm->agp.bridge) {
- return ttm_pool_populate(ttm, ctx);
- }
-#endif
-
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev, ctx);
- }
-#endif
- return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
+ return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
}
static void
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
- struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
-#if IS_ENABLED(CONFIG_AGP)
- if (drm->agp.bridge) {
- ttm_pool_unpopulate(ttm);
- return;
- }
-#endif
-
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate((void *)ttm, dev);
- return;
- }
-#endif
-
- ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
+ return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
}
static void
dma_resv_add_shared_fence(resv, &fence->base);
}
+static void
+nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ nouveau_bo_move_ntfy(bo, false, NULL);
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .ttm_tt_bind = &nouveau_ttm_tt_bind,
- .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
- .move_notify = nouveau_bo_move_ntfy,
+ .delete_mem_notify = nouveau_bo_delete_mem_notify,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};
struct nouveau_drm_tile *tile;
- /* protect by the ttm reservation lock */
- int pin_refcnt;
-
struct ttm_bo_kmap_obj dma_buf_vmap;
};
void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
- if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+ if (chan->push.buffer && chan->push.buffer->bo.pin_count)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
nouveau_do_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct ttm_resource_manager *man;
int ret;
nouveau_svm_suspend(drm);
}
NV_DEBUG(drm, "evicting buffers...\n");
- ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+
+ man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = nouveau_gem_prime_pin,
- .gem_prime_unpin = nouveau_gem_prime_unpin,
- .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
- .gem_prime_vmap = nouveau_gem_prime_vmap,
- .gem_prime_vunmap = nouveau_gem_prime_vunmap,
-
- .gem_free_object_unlocked = nouveau_gem_object_del,
- .gem_open_object = nouveau_gem_object_open,
- .gem_close_object = nouveau_gem_object_close,
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = nouveau_display_dumb_map_offset,
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drm_audio_component.h>
ttm_bo_unreserve(&nvbo->bo);
}
+const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
+ .free = nouveau_gem_object_del,
+ .open = nouveau_gem_object_open,
+ .close = nouveau_gem_object_close,
+ .pin = nouveau_gem_prime_pin,
+ .unpin = nouveau_gem_prime_unpin,
+ .get_sg_table = nouveau_gem_prime_get_sg_table,
+ .vmap = nouveau_gem_prime_vmap,
+ .vunmap = nouveau_gem_prime_vunmap,
+};
+
int
nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
+ nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
- nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
*pnvbo = nvbo;
return 0;
}
#include "nouveau_drv.h"
#include "nouveau_bo.h"
+extern const struct drm_gem_object_funcs nouveau_gem_object_funcs;
+
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
}
int
-nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
+nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
mem->comp = 0;
}
- if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
- else args.dma = tt->dma_address;
+ if (tt->sg)
+ args.sgl = tt->sg->sgl;
+ else
+ args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
cli->base.super = true;
#ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__
#include <drm/ttm/ttm_bo_api.h>
-struct ttm_dma_tt;
+struct ttm_tt;
#include <nvif/mem.h>
#include <nvif/vmm.h>
struct ttm_resource *);
void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
-int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *);
+int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
#endif
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
+ nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
#include "nouveau_drv.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
+#include "nouveau_bo.h"
struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here
*/
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
struct nouveau_mem *mem;
};
if (ttm) {
nouveau_sgdma_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
- ttm_dma_tt_fini(&nvbe->ttm);
+ ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_sgdma_be *nvbe;
+ enum ttm_caching caching;
+
+ if (nvbo->force_coherent)
+ caching = ttm_uncached;
+ else if (drm->agp.bridge)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe);
return NULL;
}
- return &nvbe->ttm.ttm;
+ return &nvbe->ttm;
}
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+
+#include <linux/swiotlb.h>
+
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
return ret;
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
nouveau_mem_del(reg);
return ret;
if (ret)
return ret;
- nouveau_bo_del_io_reserve_lru(bo);
+ ret = nouveau_ttm_fault_reserve_notify(bo);
+ if (ret)
+ goto error_unlock;
+ nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ nouveau_bo_add_io_reserve_lru(bo);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
- nouveau_bo_add_io_reserve_lru(bo);
-
+error_unlock:
dma_resv_unlock(bo->base.resv);
-
return ret;
}
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ttm_resource_manager_set_used(man, false);
- ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+ ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
kfree(man);
ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
else {
ttm_resource_manager_set_used(man, false);
- ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+ ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
kfree(man);
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
+ bool need_swiotlb = false;
int typei, ret;
ret = nouveau_ttm_init_host(drm, 0);
drm->agp.cma = pci->agp.cma;
}
- ret = ttm_bo_device_init(&drm->ttm.bdev,
- &nouveau_bo_driver,
- dev->anon_inode->i_mapping,
- dev->vma_offset_manager,
- drm->client.mmu.dmabits <= 32 ? true : false);
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
+ need_swiotlb = !!swiotlb_nr_tbl();
+#endif
+
+ ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
+ drm->dev->dev, dev->anon_inode->i_mapping,
+ dev->vma_offset_manager, need_swiotlb,
+ drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
}
static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
}
static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
return 0;
}
-static const struct vm_operations_struct omap_gem_vm_ops = {
- .fault = omap_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations omapdriver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_free_object_unlocked = omap_gem_free_object,
- .gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
.ioctls = ioctls,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-vm_fault_t omap_gem_fault(struct vm_fault *vmf)
+static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
* Constructor & Destructor
*/
-void omap_gem_free_object(struct drm_gem_object *obj)
+static void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct omap_drm_private *priv = dev->dev_private;
return true;
}
+static const struct vm_operations_struct omap_gem_vm_ops = {
+ .fault = omap_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs omap_gem_object_funcs = {
+ .free = omap_gem_free_object,
+ .export = omap_gem_prime_export,
+ .vm_ops = &omap_gem_vm_ops,
+};
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
size = PAGE_ALIGN(gsize.bytes);
}
+ obj->funcs = &omap_gem_object_funcs;
+
/* Initialize the GEM object. */
if (!(flags & OMAP_BO_MEM_SHMEM)) {
drm_gem_private_object_init(dev, obj, size);
struct sg_table *sgt);
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, u32 flags, u32 *handle);
-void omap_gem_free_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
/* Dumb Buffers Interface */
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-vm_fault_t omap_gem_fault(struct vm_fault *vmf);
int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
around the Novatek NT35510 display controller, such as some
Hydis panels.
+config DRM_PANEL_NOVATEK_NT36672A
+ tristate "Novatek NT36672A DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT36672A display controller, such as some
+ Tianma panels used in a few Xiaomi Poco F1 mobile phones.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
Say Y here if you want to enable support for the Sony ACX565AKM
800x600 3.5" panel (found on the Nokia N900).
+config DRM_PANEL_TDO_TL070WSH30
+ tristate "TDO TL070WSH30 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for TDO TL070WSH30 TFT-LCD
+ panel module. The panel has a 1024×600 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host, a built-in LED backlight and touch controller.
+
config DRM_PANEL_TPO_TD028TTEC1
tristate "Toppoly (TPO) TD028TTEC1 panel driver"
depends on OF && SPI
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ *
+ * This driver is for the DSI interface to panels using the NT36672A display driver IC
+ * from Novatek.
+ * Currently supported are the Tianma FHD+ panels found in some Xiaomi phones, including
+ * some variants of the Poco F1 phone.
+ *
+ * Panels using the Novatek NT37762A IC should add appropriate configuration per-panel and
+ * use this driver.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct nt36672a_panel_cmd {
+ const char data[2];
+};
+
+static const char * const nt36672a_regulator_names[] = {
+ "vddio",
+ "vddpos",
+ "vddneg",
+};
+
+static unsigned long const nt36672a_regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000
+};
+
+struct nt36672a_panel_desc {
+ const struct drm_display_mode *display_mode;
+ const char *panel_name;
+
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+
+ unsigned int num_on_cmds_1;
+ const struct nt36672a_panel_cmd *on_cmds_1;
+ unsigned int num_on_cmds_2;
+ const struct nt36672a_panel_cmd *on_cmds_2;
+
+ unsigned int num_off_cmds;
+ const struct nt36672a_panel_cmd *off_cmds;
+};
+
+struct nt36672a_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+ const struct nt36672a_panel_desc *desc;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(nt36672a_regulator_names)];
+
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct nt36672a_panel *to_nt36672a_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt36672a_panel, base);
+}
+
+static int nt36672a_send_cmds(struct drm_panel *panel, const struct nt36672a_panel_cmd *cmds,
+ int num)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ const struct nt36672a_panel_cmd *cmd = &cmds[i];
+
+ err = mipi_dsi_dcs_write(pinfo->link, cmd->data[0], cmd->data + 1, 1);
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int nt36672a_panel_power_off(struct drm_panel *panel)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ int ret = 0;
+
+ gpiod_set_value(pinfo->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
+ if (ret)
+ dev_err(panel->dev, "regulator_bulk_disable failed %d\n", ret);
+
+ return ret;
+}
+
+static int nt36672a_panel_unprepare(struct drm_panel *panel)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ int ret;
+
+ if (!pinfo->prepared)
+ return 0;
+
+ /* send off cmds */
+ ret = nt36672a_send_cmds(panel, pinfo->desc->off_cmds,
+ pinfo->desc->num_off_cmds);
+
+ if (ret < 0)
+ dev_err(panel->dev, "failed to send DCS off cmds: %d\n", ret);
+
+ ret = mipi_dsi_dcs_set_display_off(pinfo->link);
+ if (ret < 0)
+ dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret);
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
+ if (ret < 0)
+ dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret);
+
+ /* 0x3C = 60ms delay */
+ msleep(60);
+
+ ret = nt36672a_panel_power_off(panel);
+ if (ret < 0)
+ dev_err(panel->dev, "power_off failed ret = %d\n", ret);
+
+ pinfo->prepared = false;
+
+ return ret;
+}
+
+static int nt36672a_panel_power_on(struct nt36672a_panel *pinfo)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * As per downstream kernel, Reset sequence of Tianma FHD panel requires the panel to
+ * be out of reset for 10ms, followed by being held in reset for 10ms. But for Android
+ * AOSP, we needed to bump it upto 200ms otherwise we get white screen sometimes.
+ * FIXME: Try to reduce this 200ms to a lesser value.
+ */
+ gpiod_set_value(pinfo->reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value(pinfo->reset_gpio, 0);
+ msleep(200);
+
+ return 0;
+}
+
+static int nt36672a_panel_prepare(struct drm_panel *panel)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ int err;
+
+ if (pinfo->prepared)
+ return 0;
+
+ err = nt36672a_panel_power_on(pinfo);
+ if (err < 0)
+ goto poweroff;
+
+ /* send first part of init cmds */
+ err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_1,
+ pinfo->desc->num_on_cmds_1);
+
+ if (err < 0) {
+ dev_err(panel->dev, "failed to send DCS Init 1st Code: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ /* 0x46 = 70 ms delay */
+ msleep(70);
+
+ err = mipi_dsi_dcs_set_display_on(pinfo->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to Set Display ON: %d\n", err);
+ goto poweroff;
+ }
+
+ /* Send rest of the init cmds */
+ err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_2,
+ pinfo->desc->num_on_cmds_2);
+
+ if (err < 0) {
+ dev_err(panel->dev, "failed to send DCS Init 2nd Code: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ pinfo->prepared = true;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value(pinfo->reset_gpio, 0);
+ return err;
+}
+
+static int nt36672a_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ const struct drm_display_mode *m = pinfo->desc->display_mode;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n", m->hdisplay,
+ m->vdisplay, drm_mode_vrefresh(m));
+ return -ENOMEM;
+ }
+
+ connector->display_info.width_mm = pinfo->desc->width_mm;
+ connector->display_info.height_mm = pinfo->desc->height_mm;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs panel_funcs = {
+ .unprepare = nt36672a_panel_unprepare,
+ .prepare = nt36672a_panel_prepare,
+ .get_modes = nt36672a_panel_get_modes,
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_1[] = {
+ /* skin enhancement mode */
+ { .data = {0xFF, 0x22} },
+ { .data = {0x00, 0x40} },
+ { .data = {0x01, 0xC0} },
+ { .data = {0x02, 0x40} },
+ { .data = {0x03, 0x40} },
+ { .data = {0x04, 0x40} },
+ { .data = {0x05, 0x40} },
+ { .data = {0x06, 0x40} },
+ { .data = {0x07, 0x40} },
+ { .data = {0x08, 0x40} },
+ { .data = {0x09, 0x40} },
+ { .data = {0x0A, 0x40} },
+ { .data = {0x0B, 0x40} },
+ { .data = {0x0C, 0x40} },
+ { .data = {0x0D, 0x40} },
+ { .data = {0x0E, 0x40} },
+ { .data = {0x0F, 0x40} },
+ { .data = {0x10, 0x40} },
+ { .data = {0x11, 0x50} },
+ { .data = {0x12, 0x60} },
+ { .data = {0x13, 0x70} },
+ { .data = {0x14, 0x58} },
+ { .data = {0x15, 0x68} },
+ { .data = {0x16, 0x78} },
+ { .data = {0x17, 0x77} },
+ { .data = {0x18, 0x39} },
+ { .data = {0x19, 0x2D} },
+ { .data = {0x1A, 0x2E} },
+ { .data = {0x1B, 0x32} },
+ { .data = {0x1C, 0x37} },
+ { .data = {0x1D, 0x3A} },
+ { .data = {0x1E, 0x40} },
+ { .data = {0x1F, 0x40} },
+ { .data = {0x20, 0x40} },
+ { .data = {0x21, 0x40} },
+ { .data = {0x22, 0x40} },
+ { .data = {0x23, 0x40} },
+ { .data = {0x24, 0x40} },
+ { .data = {0x25, 0x40} },
+ { .data = {0x26, 0x40} },
+ { .data = {0x27, 0x40} },
+ { .data = {0x28, 0x40} },
+ { .data = {0x2D, 0x00} },
+ { .data = {0x2F, 0x40} },
+ { .data = {0x30, 0x40} },
+ { .data = {0x31, 0x40} },
+ { .data = {0x32, 0x40} },
+ { .data = {0x33, 0x40} },
+ { .data = {0x34, 0x40} },
+ { .data = {0x35, 0x40} },
+ { .data = {0x36, 0x40} },
+ { .data = {0x37, 0x40} },
+ { .data = {0x38, 0x40} },
+ { .data = {0x39, 0x40} },
+ { .data = {0x3A, 0x40} },
+ { .data = {0x3B, 0x40} },
+ { .data = {0x3D, 0x40} },
+ { .data = {0x3F, 0x40} },
+ { .data = {0x40, 0x40} },
+ { .data = {0x41, 0x40} },
+ { .data = {0x42, 0x40} },
+ { .data = {0x43, 0x40} },
+ { .data = {0x44, 0x40} },
+ { .data = {0x45, 0x40} },
+ { .data = {0x46, 0x40} },
+ { .data = {0x47, 0x40} },
+ { .data = {0x48, 0x40} },
+ { .data = {0x49, 0x40} },
+ { .data = {0x4A, 0x40} },
+ { .data = {0x4B, 0x40} },
+ { .data = {0x4C, 0x40} },
+ { .data = {0x4D, 0x40} },
+ { .data = {0x4E, 0x40} },
+ { .data = {0x4F, 0x40} },
+ { .data = {0x50, 0x40} },
+ { .data = {0x51, 0x40} },
+ { .data = {0x52, 0x40} },
+ { .data = {0x53, 0x01} },
+ { .data = {0x54, 0x01} },
+ { .data = {0x55, 0xFE} },
+ { .data = {0x56, 0x77} },
+ { .data = {0x58, 0xCD} },
+ { .data = {0x59, 0xD0} },
+ { .data = {0x5A, 0xD0} },
+ { .data = {0x5B, 0x50} },
+ { .data = {0x5C, 0x50} },
+ { .data = {0x5D, 0x50} },
+ { .data = {0x5E, 0x50} },
+ { .data = {0x5F, 0x50} },
+ { .data = {0x60, 0x50} },
+ { .data = {0x61, 0x50} },
+ { .data = {0x62, 0x50} },
+ { .data = {0x63, 0x50} },
+ { .data = {0x64, 0x50} },
+ { .data = {0x65, 0x50} },
+ { .data = {0x66, 0x50} },
+ { .data = {0x67, 0x50} },
+ { .data = {0x68, 0x50} },
+ { .data = {0x69, 0x50} },
+ { .data = {0x6A, 0x50} },
+ { .data = {0x6B, 0x50} },
+ { .data = {0x6C, 0x50} },
+ { .data = {0x6D, 0x50} },
+ { .data = {0x6E, 0x50} },
+ { .data = {0x6F, 0x50} },
+ { .data = {0x70, 0x07} },
+ { .data = {0x71, 0x00} },
+ { .data = {0x72, 0x00} },
+ { .data = {0x73, 0x00} },
+ { .data = {0x74, 0x06} },
+ { .data = {0x75, 0x0C} },
+ { .data = {0x76, 0x03} },
+ { .data = {0x77, 0x09} },
+ { .data = {0x78, 0x0F} },
+ { .data = {0x79, 0x68} },
+ { .data = {0x7A, 0x88} },
+ { .data = {0x7C, 0x80} },
+ { .data = {0x7D, 0x80} },
+ { .data = {0x7E, 0x80} },
+ { .data = {0x7F, 0x00} },
+ { .data = {0x80, 0x00} },
+ { .data = {0x81, 0x00} },
+ { .data = {0x83, 0x01} },
+ { .data = {0x84, 0x00} },
+ { .data = {0x85, 0x80} },
+ { .data = {0x86, 0x80} },
+ { .data = {0x87, 0x80} },
+ { .data = {0x88, 0x40} },
+ { .data = {0x89, 0x91} },
+ { .data = {0x8A, 0x98} },
+ { .data = {0x8B, 0x80} },
+ { .data = {0x8C, 0x80} },
+ { .data = {0x8D, 0x80} },
+ { .data = {0x8E, 0x80} },
+ { .data = {0x8F, 0x80} },
+ { .data = {0x90, 0x80} },
+ { .data = {0x91, 0x80} },
+ { .data = {0x92, 0x80} },
+ { .data = {0x93, 0x80} },
+ { .data = {0x94, 0x80} },
+ { .data = {0x95, 0x80} },
+ { .data = {0x96, 0x80} },
+ { .data = {0x97, 0x80} },
+ { .data = {0x98, 0x80} },
+ { .data = {0x99, 0x80} },
+ { .data = {0x9A, 0x80} },
+ { .data = {0x9B, 0x80} },
+ { .data = {0x9C, 0x80} },
+ { .data = {0x9D, 0x80} },
+ { .data = {0x9E, 0x80} },
+ { .data = {0x9F, 0x80} },
+ { .data = {0xA0, 0x8A} },
+ { .data = {0xA2, 0x80} },
+ { .data = {0xA6, 0x80} },
+ { .data = {0xA7, 0x80} },
+ { .data = {0xA9, 0x80} },
+ { .data = {0xAA, 0x80} },
+ { .data = {0xAB, 0x80} },
+ { .data = {0xAC, 0x80} },
+ { .data = {0xAD, 0x80} },
+ { .data = {0xAE, 0x80} },
+ { .data = {0xAF, 0x80} },
+ { .data = {0xB7, 0x76} },
+ { .data = {0xB8, 0x76} },
+ { .data = {0xB9, 0x05} },
+ { .data = {0xBA, 0x0D} },
+ { .data = {0xBB, 0x14} },
+ { .data = {0xBC, 0x0F} },
+ { .data = {0xBD, 0x18} },
+ { .data = {0xBE, 0x1F} },
+ { .data = {0xBF, 0x05} },
+ { .data = {0xC0, 0x0D} },
+ { .data = {0xC1, 0x14} },
+ { .data = {0xC2, 0x03} },
+ { .data = {0xC3, 0x07} },
+ { .data = {0xC4, 0x0A} },
+ { .data = {0xC5, 0xA0} },
+ { .data = {0xC6, 0x55} },
+ { .data = {0xC7, 0xFF} },
+ { .data = {0xC8, 0x39} },
+ { .data = {0xC9, 0x44} },
+ { .data = {0xCA, 0x12} },
+ { .data = {0xCD, 0x80} },
+ { .data = {0xDB, 0x80} },
+ { .data = {0xDC, 0x80} },
+ { .data = {0xDD, 0x80} },
+ { .data = {0xE0, 0x80} },
+ { .data = {0xE1, 0x80} },
+ { .data = {0xE2, 0x80} },
+ { .data = {0xE3, 0x80} },
+ { .data = {0xE4, 0x80} },
+ { .data = {0xE5, 0x40} },
+ { .data = {0xE6, 0x40} },
+ { .data = {0xE7, 0x40} },
+ { .data = {0xE8, 0x40} },
+ { .data = {0xE9, 0x40} },
+ { .data = {0xEA, 0x40} },
+ { .data = {0xEB, 0x40} },
+ { .data = {0xEC, 0x40} },
+ { .data = {0xED, 0x40} },
+ { .data = {0xEE, 0x40} },
+ { .data = {0xEF, 0x40} },
+ { .data = {0xF0, 0x40} },
+ { .data = {0xF1, 0x40} },
+ { .data = {0xF2, 0x40} },
+ { .data = {0xF3, 0x40} },
+ { .data = {0xF4, 0x40} },
+ { .data = {0xF5, 0x40} },
+ { .data = {0xF6, 0x40} },
+ { .data = {0xFB, 0x1} },
+ { .data = {0xFF, 0x23} },
+ { .data = {0xFB, 0x01} },
+ /* dimming enable */
+ { .data = {0x01, 0x84} },
+ { .data = {0x05, 0x2D} },
+ { .data = {0x06, 0x00} },
+ /* resolution 1080*2246 */
+ { .data = {0x11, 0x01} },
+ { .data = {0x12, 0x7B} },
+ { .data = {0x15, 0x6F} },
+ { .data = {0x16, 0x0B} },
+ /* UI mode */
+ { .data = {0x29, 0x0A} },
+ { .data = {0x30, 0xFF} },
+ { .data = {0x31, 0xFF} },
+ { .data = {0x32, 0xFF} },
+ { .data = {0x33, 0xFF} },
+ { .data = {0x34, 0xFF} },
+ { .data = {0x35, 0xFF} },
+ { .data = {0x36, 0xFF} },
+ { .data = {0x37, 0xFF} },
+ { .data = {0x38, 0xFC} },
+ { .data = {0x39, 0xF8} },
+ { .data = {0x3A, 0xF4} },
+ { .data = {0x3B, 0xF1} },
+ { .data = {0x3D, 0xEE} },
+ { .data = {0x3F, 0xEB} },
+ { .data = {0x40, 0xE8} },
+ { .data = {0x41, 0xE5} },
+ /* STILL mode */
+ { .data = {0x2A, 0x13} },
+ { .data = {0x45, 0xFF} },
+ { .data = {0x46, 0xFF} },
+ { .data = {0x47, 0xFF} },
+ { .data = {0x48, 0xFF} },
+ { .data = {0x49, 0xFF} },
+ { .data = {0x4A, 0xFF} },
+ { .data = {0x4B, 0xFF} },
+ { .data = {0x4C, 0xFF} },
+ { .data = {0x4D, 0xED} },
+ { .data = {0x4E, 0xD5} },
+ { .data = {0x4F, 0xBF} },
+ { .data = {0x50, 0xA6} },
+ { .data = {0x51, 0x96} },
+ { .data = {0x52, 0x86} },
+ { .data = {0x53, 0x76} },
+ { .data = {0x54, 0x66} },
+ /* MOVING mode */
+ { .data = {0x2B, 0x0E} },
+ { .data = {0x58, 0xFF} },
+ { .data = {0x59, 0xFF} },
+ { .data = {0x5A, 0xFF} },
+ { .data = {0x5B, 0xFF} },
+ { .data = {0x5C, 0xFF} },
+ { .data = {0x5D, 0xFF} },
+ { .data = {0x5E, 0xFF} },
+ { .data = {0x5F, 0xFF} },
+ { .data = {0x60, 0xF6} },
+ { .data = {0x61, 0xEA} },
+ { .data = {0x62, 0xE1} },
+ { .data = {0x63, 0xD8} },
+ { .data = {0x64, 0xCE} },
+ { .data = {0x65, 0xC3} },
+ { .data = {0x66, 0xBA} },
+ { .data = {0x67, 0xB3} },
+ { .data = {0xFF, 0x25} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0x05, 0x04} },
+ { .data = {0xFF, 0x26} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0x1C, 0xAF} },
+ { .data = {0xFF, 0x10} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0x51, 0xFF} },
+ { .data = {0x53, 0x24} },
+ { .data = {0x55, 0x00} },
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_2[] = {
+ { .data = {0xFF, 0x24} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0xC3, 0x01} },
+ { .data = {0xC4, 0x54} },
+ { .data = {0xFF, 0x10} },
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_off_cmds[] = {
+ { .data = {0xFF, 0x24} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0xC3, 0x01} },
+ { .data = {0xFF, 0x10} },
+};
+
+static const struct drm_display_mode tianma_fhd_video_panel_default_mode = {
+ .clock = 161331,
+
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 40,
+ .hsync_end = 1080 + 40 + 20,
+ .htotal = 1080 + 40 + 20 + 44,
+
+ .vdisplay = 2246,
+ .vsync_start = 2246 + 15,
+ .vsync_end = 2246 + 15 + 2,
+ .vtotal = 2246 + 15 + 2 + 8,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct nt36672a_panel_desc tianma_fhd_video_panel_desc = {
+ .display_mode = &tianma_fhd_video_panel_default_mode,
+
+ .width_mm = 68,
+ .height_mm = 136,
+
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .on_cmds_1 = tianma_fhd_video_on_cmds_1,
+ .num_on_cmds_1 = ARRAY_SIZE(tianma_fhd_video_on_cmds_1),
+ .on_cmds_2 = tianma_fhd_video_on_cmds_2,
+ .num_on_cmds_2 = ARRAY_SIZE(tianma_fhd_video_on_cmds_2),
+ .off_cmds = tianma_fhd_video_off_cmds,
+ .num_off_cmds = ARRAY_SIZE(tianma_fhd_video_off_cmds),
+};
+
+static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
+{
+ struct device *dev = &pinfo->link->dev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++)
+ pinfo->supplies[i].supply = nt36672a_regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies),
+ pinfo->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
+ ret = regulator_set_load(pinfo->supplies[i].consumer,
+ nt36672a_regulator_enable_loads[i]);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
+ }
+
+ pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pinfo->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
+ "failed to get reset gpio from DT\n");
+
+ drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ drm_panel_add(&pinfo->base);
+
+ return 0;
+}
+
+static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct nt36672a_panel *pinfo;
+ const struct nt36672a_panel_desc *desc;
+ int err;
+
+ pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->mode_flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+ pinfo->desc = desc;
+ pinfo->link = dsi;
+
+ mipi_dsi_set_drvdata(dsi, pinfo);
+
+ err = nt36672a_panel_add(pinfo);
+ if (err < 0)
+ return err;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = drm_panel_unprepare(&pinfo->base);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
+
+ err = drm_panel_disable(&pinfo->base);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&pinfo->base);
+
+ return 0;
+}
+
+static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&pinfo->base);
+ drm_panel_unprepare(&pinfo->base);
+}
+
+static const struct of_device_id tianma_fhd_video_of_match[] = {
+ { .compatible = "tianma,fhd-video", .data = &tianma_fhd_video_panel_desc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tianma_fhd_video_of_match);
+
+static struct mipi_dsi_driver nt36672a_panel_driver = {
+ .driver = {
+ .name = "panel-tianma-nt36672a",
+ .of_match_table = tianma_fhd_video_of_match,
+ },
+ .probe = nt36672a_panel_probe,
+ .remove = nt36672a_panel_remove,
+ .shutdown = nt36672a_panel_shutdown,
+};
+module_mipi_dsi_driver(nt36672a_panel_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_DESCRIPTION("NOVATEK NT36672A based MIPI-DSI LCD panel driver");
+MODULE_LICENSE("GPL");
dev_warn(ctx->dev, "mipi dsi dcs write buffer failed\n");
}
-static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
- size_t len)
-{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
- /* data will be sent in dsi hs mode (ie. no lpm) */
- dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
-
- otm8009a_dcs_write_buf(ctx, data, len);
-
- /* restore back the dsi lpm mode */
- dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-}
-
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
- otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
- otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
return 0;
}
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
};
static const struct drm_display_mode default_mode = {
- .clock = 52582,
+ .clock = 54000,
.hdisplay = 720,
- .hsync_start = 720 + 38,
- .hsync_end = 720 + 38 + 8,
- .htotal = 720 + 38 + 8 + 38,
+ .hsync_start = 720 + 48,
+ .hsync_end = 720 + 48 + 9,
+ .htotal = 720 + 48 + 9 + 48,
.vdisplay = 1280,
.vsync_start = 1280 + 12,
- .vsync_end = 1280 + 12 + 4,
- .vtotal = 1280 + 12 + 4 + 12,
+ .vsync_end = 1280 + 12 + 5,
+ .vtotal = 1280 + 12 + 5 + 12,
.flags = 0,
.width_mm = 68,
.height_mm = 122,
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
static int rb070d30_panel_enable(struct drm_panel *panel)
{
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
- int ret;
- ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
- if (ret)
- return ret;
-
- return 0;
+ return mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
}
static int rb070d30_panel_disable(struct drm_panel *panel)
0x00, 0x00 }
};
-unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
+static const unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
0x18, 0x19, 0x1a, 0x1b, 0x1c,
0x1d, 0x1e, 0x1f, 0x20, 0x21
};
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode = {
+ .clock = 51200,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 100,
+ .hsync_end = 1024 + 100 + 100,
+ .htotal = 1024 + 100 + 100 + 120,
+ .vdisplay = 600,
+ .vsync_start = 600 + 10,
+ .vsync_end = 600 + 10 + 10,
+ .vtotal = 600 + 10 + 10 + 15,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
+ .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 90,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode arm_rtsm_mode[] = {
{
.clock = 65000,
}, {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
+ }, {
+ .compatible = "yes-optoelectronics,ytc700tlag-05-201c",
+ .data = &yes_optoelectronics_ytc700tlag_05_201c,
}, {
/* Must be the last entry */
.compatible = "panel-dpi",
.height_mm = 130,
};
-struct st7703_panel_desc jh057n00900_panel_desc = {
+static const struct st7703_panel_desc jh057n00900_panel_desc = {
.mode = &jh057n00900_mode,
.lanes = 4,
.mode_flags = MIPI_DSI_MODE_VIDEO |
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tdo_tl070wsh30_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline
+struct tdo_tl070wsh30_panel *to_tdo_tl070wsh30_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct tdo_tl070wsh30_panel, base);
+}
+
+static int tdo_tl070wsh30_panel_prepare(struct drm_panel *panel)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel);
+ int err;
+
+ if (tdo_tl070wsh30->prepared)
+ return 0;
+
+ err = regulator_enable(tdo_tl070wsh30->supply);
+ if (err < 0)
+ return err;
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 1);
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 0);
+
+ msleep(200);
+
+ err = mipi_dsi_dcs_exit_sleep_mode(tdo_tl070wsh30->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ regulator_disable(tdo_tl070wsh30->supply);
+ return err;
+ }
+
+ msleep(200);
+
+ err = mipi_dsi_dcs_set_display_on(tdo_tl070wsh30->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ regulator_disable(tdo_tl070wsh30->supply);
+ return err;
+ }
+
+ msleep(20);
+
+ tdo_tl070wsh30->prepared = true;
+
+ return 0;
+}
+
+static int tdo_tl070wsh30_panel_unprepare(struct drm_panel *panel)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel);
+ int err;
+
+ if (!tdo_tl070wsh30->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(tdo_tl070wsh30->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ usleep_range(10000, 11000);
+
+ err = mipi_dsi_dcs_enter_sleep_mode(tdo_tl070wsh30->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+ return err;
+ }
+
+ usleep_range(10000, 11000);
+
+ regulator_disable(tdo_tl070wsh30->supply);
+
+ tdo_tl070wsh30->prepared = false;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 47250,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 46,
+ .hsync_end = 1024 + 46 + 80,
+ .htotal = 1024 + 46 + 80 + 100,
+ .vdisplay = 600,
+ .vsync_start = 600 + 5,
+ .vsync_end = 600 + 5 + 5,
+ .vtotal = 600 + 5 + 5 + 20,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static int tdo_tl070wsh30_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 154;
+ connector->display_info.height_mm = 85;
+ connector->display_info.bpc = 8;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs tdo_tl070wsh30_panel_funcs = {
+ .unprepare = tdo_tl070wsh30_panel_unprepare,
+ .prepare = tdo_tl070wsh30_panel_prepare,
+ .get_modes = tdo_tl070wsh30_panel_get_modes,
+};
+
+static const struct of_device_id tdo_tl070wsh30_of_match[] = {
+ { .compatible = "tdo,tl070wsh30", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tdo_tl070wsh30_of_match);
+
+static int tdo_tl070wsh30_panel_add(struct tdo_tl070wsh30_panel *tdo_tl070wsh30)
+{
+ struct device *dev = &tdo_tl070wsh30->link->dev;
+ int err;
+
+ tdo_tl070wsh30->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(tdo_tl070wsh30->supply))
+ return PTR_ERR(tdo_tl070wsh30->supply);
+
+ tdo_tl070wsh30->reset_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tdo_tl070wsh30->reset_gpio)) {
+ err = PTR_ERR(tdo_tl070wsh30->reset_gpio);
+ dev_dbg(dev, "failed to get reset gpio: %d\n", err);
+ return err;
+ }
+
+ drm_panel_init(&tdo_tl070wsh30->base, &tdo_tl070wsh30->link->dev,
+ &tdo_tl070wsh30_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&tdo_tl070wsh30->base);
+ if (err)
+ return err;
+
+ drm_panel_add(&tdo_tl070wsh30->base);
+
+ return 0;
+}
+
+static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
+
+ tdo_tl070wsh30 = devm_kzalloc(&dsi->dev, sizeof(*tdo_tl070wsh30),
+ GFP_KERNEL);
+ if (!tdo_tl070wsh30)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, tdo_tl070wsh30);
+ tdo_tl070wsh30->link = dsi;
+
+ err = tdo_tl070wsh30_panel_add(tdo_tl070wsh30);
+ if (err < 0)
+ return err;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&tdo_tl070wsh30->base);
+ drm_panel_disable(&tdo_tl070wsh30->base);
+ drm_panel_unprepare(&tdo_tl070wsh30->base);
+
+ return 0;
+}
+
+static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&tdo_tl070wsh30->base);
+ drm_panel_unprepare(&tdo_tl070wsh30->base);
+}
+
+static struct mipi_dsi_driver tdo_tl070wsh30_panel_driver = {
+ .driver = {
+ .name = "panel-tdo-tl070wsh30",
+ .of_match_table = tdo_tl070wsh30_of_match,
+ },
+ .probe = tdo_tl070wsh30_panel_probe,
+ .remove = tdo_tl070wsh30_panel_remove,
+ .shutdown = tdo_tl070wsh30_panel_shutdown,
+};
+module_mipi_dsi_driver(tdo_tl070wsh30_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("TDO TL070WSH30 panel driver");
+MODULE_LICENSE("GPL v2");
static int td028ttec1_enable(struct drm_panel *panel)
{
struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
- int ret;
- ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
- if (ret)
- return ret;
-
- return 0;
+ return jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
}
static int td028ttec1_disable(struct drm_panel *panel)
u32 flags)
{
struct dev_pm_opp *opp;
- int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
- err = dev_pm_opp_set_rate(dev, *freq);
- if (err)
- return err;
-
- return 0;
+ return dev_pm_opp_set_rate(dev, *freq);
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
- int err;
-
pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
if (IS_ERR(pfdev->rstc)) {
dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
}
- err = reset_control_deassert(pfdev->rstc);
- if (err)
- return err;
-
- return 0;
+ return reset_control_deassert(pfdev->rstc);
}
static void panfrost_reset_fini(struct panfrost_device *pfdev)
/* pm_domains for devices with more than one. */
struct device *pm_domain_devs[MAX_PM_DOMAINS];
struct device_link *pm_domain_links[MAX_PM_DOMAINS];
+ bool coherent;
struct panfrost_features features;
const struct panfrost_compatible *comp;
if (!pfdev->comp)
return -ENODEV;
+ pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
+ struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
+ obj->base.map_cached = pfdev->coherent;
return &obj->base.base;
}
struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
-
+ bool stopped;
+ struct mutex lock;
u64 fence_context;
u64 emit_seqno;
};
job_write(pfdev, JOB_INT_MASK, irq_mask);
}
+static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
+ struct drm_sched_job *bad)
+{
+ bool stopped = false;
+
+ mutex_lock(&queue->lock);
+ if (!queue->stopped) {
+ drm_sched_stop(&queue->sched, bad);
+ if (bad)
+ drm_sched_increase_karma(bad);
+ queue->stopped = true;
+ stopped = true;
+ }
+ mutex_unlock(&queue->lock);
+
+ return stopped;
+}
+
static void panfrost_job_timedout(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
+ /* Scheduler is already stopped, nothing to do. */
+ if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
+ return;
+
if (!mutex_trylock(&pfdev->reset_lock))
return;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
- drm_sched_stop(sched, sched_job);
- if (js != i)
- /* Ensure any timeouts on other slots have finished */
+ /*
+ * If the queue is still active, make sure we wait for any
+ * pending timeouts.
+ */
+ if (!pfdev->js->queue[i].stopped)
cancel_delayed_work_sync(&sched->work_tdr);
- }
- drm_sched_increase_karma(sched_job);
+ /*
+ * If the scheduler was not already stopped, there's a tiny
+ * chance a timeout has expired just before we stopped it, and
+ * drm_sched_stop() does not flush pending works. Let's flush
+ * them now so the timeout handler doesn't get called in the
+ * middle of a reset.
+ */
+ if (panfrost_scheduler_stop(&pfdev->js->queue[i], NULL))
+ cancel_delayed_work_sync(&sched->work_tdr);
+
+ /*
+ * Now that we cancelled the pending timeouts, we can safely
+ * reset the stopped state.
+ */
+ pfdev->js->queue[i].stopped = false;
+ }
spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+ mutex_unlock(&pfdev->reset_lock);
+
/* restart scheduler after GPU is usable again */
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_start(&pfdev->js->queue[i].sched, true);
-
- mutex_unlock(&pfdev->reset_lock);
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
}
for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ mutex_init(&js->queue[j].lock);
+
js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched,
job_write(pfdev, JOB_INT_MASK, 0);
- for (j = 0; j < NUM_JOB_SLOTS; j++)
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
drm_sched_fini(&js->queue[j].sched);
+ mutex_destroy(&js->queue[j].lock);
+ }
}
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
{
+ struct panfrost_device *pfdev = panfrost_priv->pfdev;
+ struct panfrost_job_slot *js = pfdev->js;
int i;
for (i = 0; i < NUM_JOB_SLOTS; i++)
.pgsize_bitmap = SZ_4K | SZ_2M,
.ias = FIELD_GET(0xff, pfdev->features.mmu_features),
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
.major = 1,
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = drm_gem_cma_create_object_default_funcs,
.dumb_create = drm_gem_cma_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->tbo.base.size,
- bo->pin_count, rel);
+ bo->tbo.pin_count, rel);
}
return 0;
}
};
static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "enable");
}
static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "disable");
}
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
struct qxl_surface surf;
- int ret;
if (!new_state->fb)
return 0;
}
}
- ret = qxl_bo_pin(user_bo);
- if (ret)
- return ret;
-
- return 0;
+ return qxl_bo_pin(user_bo);
}
static void qxl_plane_cleanup_fb(struct drm_plane *plane,
struct ttm_place placements[3];
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
- unsigned int pin_count;
void *kptr;
unsigned int map_count;
int type;
if (ret)
goto out;
- if (!qobj->pin_count) {
- qxl_ttm_placement_from_domain(qobj, qobj->type, false);
+ if (!qobj->tbo.pin_count) {
+ qxl_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out;
return false;
}
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
{
u32 c = 0;
u32 pflag = 0;
unsigned int i;
- if (pinned)
- pflag |= TTM_PL_FLAG_NO_EVICT;
if (qbo->tbo.base.size <= PAGE_SIZE)
pflag |= TTM_PL_FLAG_TOPDOWN;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM) {
qbo->placements[c].mem_type = TTM_PL_VRAM;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c++].flags = pflag;
}
if (domain == QXL_GEM_DOMAIN_SURFACE) {
qbo->placements[c].mem_type = TTM_PL_PRIV;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c++].flags = pflag;
qbo->placements[c].mem_type = TTM_PL_VRAM;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c++].flags = pflag;
}
if (domain == QXL_GEM_DOMAIN_CPU) {
qbo->placements[c].mem_type = TTM_PL_SYSTEM;
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
+ qbo->placements[c++].flags = pflag;
}
if (!c) {
qbo->placements[c].mem_type = TTM_PL_SYSTEM;
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+ qbo->placements[c++].flags = 0;
}
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
+ struct ttm_operation_ctx ctx = { !kernel, false };
struct qxl_bo *bo;
enum ttm_bo_type type;
int r;
}
bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
- bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
INIT_LIST_HEAD(&bo->list);
if (surf)
bo->surf = *surf;
- qxl_ttm_placement_from_domain(bo, domain, pinned);
+ qxl_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, size,
- NULL, NULL, &qxl_ttm_bo_destroy);
+ r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, 0, &ctx, size,
+ NULL, NULL, &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(qdev->ddev.dev,
size, domain);
return r;
}
+ if (pinned)
+ ttm_bo_pin(&bo->tbo);
+ ttm_bo_unreserve(&bo->tbo);
*bo_ptr = bo;
return 0;
}
struct drm_device *ddev = bo->tbo.base.dev;
int r;
- if (bo->pin_count) {
- bo->pin_count++;
+ if (bo->tbo.pin_count) {
+ ttm_bo_pin(&bo->tbo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, bo->type, true);
+ qxl_ttm_placement_from_domain(bo, bo->type);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (likely(r == 0)) {
- bo->pin_count = 1;
- }
+ if (likely(r == 0))
+ ttm_bo_pin(&bo->tbo);
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
return r;
}
-static int __qxl_bo_unpin(struct qxl_bo *bo)
+static void __qxl_bo_unpin(struct qxl_bo *bo)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->tbo.base.dev;
- int r, i;
-
- if (!bo->pin_count) {
- dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r != 0))
- dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
- return r;
+ ttm_bo_unpin(&bo->tbo);
}
/*
if (r)
return r;
- r = __qxl_bo_unpin(bo);
+ __qxl_bo_unpin(bo);
qxl_bo_unreserve(bo);
- return r;
+ return 0;
}
void qxl_bo_force_delete(struct qxl_device *qdev)
int qxl_surf_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
+ return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
}
int qxl_vram_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
+ return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
}
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
-static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- struct drm_device *ddev = bo->tbo.base.dev;
-
- dev_err(ddev->dev, "%p reserve failed for wait\n",
- bo);
- }
- return r;
- }
- if (mem_type)
- *mem_type = bo->tbo.mem.mem_type;
-
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
-}
-
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
bool kernel, bool pinned, u32 domain,
extern void qxl_bo_unref(struct qxl_bo **bo);
extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
#endif
struct ttm_operation_ctx ctx = { true, false };
int ret;
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type, false);
+ if (!bo->tbo.pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_drv.h"
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
return;
}
qbo = to_qxl_bo(bo);
- qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
*placement = qbo->placement;
}
case TTM_PL_VRAM:
mem->bus.is_iomem = true;
mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
+ mem->bus.caching = ttm_cached;
break;
case TTM_PL_PRIV:
mem->bus.is_iomem = true;
mem->bus.offset = (mem->start << PAGE_SHIFT) +
qdev->surfaceram_base;
+ mem->bus.caching = ttm_cached;
break;
default:
return -EINVAL;
/*
* TTM backend functions.
*/
-struct qxl_ttm_tt {
- struct ttm_tt ttm;
- struct qxl_device *qdev;
- u64 offset;
-};
-
-static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm,
- struct ttm_resource *bo_mem)
-{
- struct qxl_ttm_tt *gtt = (void *)ttm;
-
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
- if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- ttm->num_pages, bo_mem, ttm);
- }
- /* Not implemented */
- return -1;
-}
-
-static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm)
-{
- /* Not implemented */
-}
-
static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
- struct qxl_ttm_tt *gtt = (void *)ttm;
-
ttm_tt_destroy_common(bdev, ttm);
- ttm_tt_fini(>t->ttm);
- kfree(gtt);
+ ttm_tt_fini(ttm);
+ kfree(ttm);
}
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct qxl_device *qdev;
- struct qxl_ttm_tt *gtt;
+ struct ttm_tt *ttm;
- qdev = qxl_get_qdev(bo->bdev);
- gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
- if (gtt == NULL)
+ ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (ttm == NULL)
return NULL;
- gtt->qdev = qdev;
- if (ttm_tt_init(>t->ttm, bo, page_flags)) {
- kfree(gtt);
+ if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
+ kfree(ttm);
return NULL;
}
- return >t->ttm;
+ return ttm;
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem)
+{
+ struct qxl_bo *qbo;
+ struct qxl_device *qdev;
+
+ if (!qxl_ttm_bo_is_qxl_bo(bo))
+ return;
+ qbo = to_qxl_bo(bo);
+ qdev = to_qxl(qbo->tbo.base.dev);
+
+ if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+ qxl_surface_evict(qdev, qbo, new_mem ? true : false);
}
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int ret;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ qxl_bo_move_notify(bo, evict, new_mem);
+
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
- return ret;
+ goto out;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, ctx, new_mem);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+out:
+ if (ret) {
+ swap(*new_mem, bo->mem);
+ qxl_bo_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ }
+ return ret;
}
-static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem)
+static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- struct qxl_bo *qbo;
- struct qxl_device *qdev;
-
- if (!qxl_ttm_bo_is_qxl_bo(bo))
- return;
- qbo = to_qxl_bo(bo);
- qdev = to_qxl(qbo->tbo.base.dev);
-
- if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
- qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+ qxl_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .ttm_tt_bind = &qxl_ttm_backend_bind,
.ttm_tt_destroy = &qxl_ttm_backend_destroy,
- .ttm_tt_unbind = &qxl_ttm_backend_unbind,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
- .move_notify = &qxl_bo_move_notify,
+ .delete_mem_notify = &qxl_bo_delete_mem_notify,
};
static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&qdev->mman.bdev,
- &qxl_bo_driver,
+ r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
- false);
+ false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u32 flags;
- unsigned pin_count;
void *kptr;
u32 tiling_flags;
u32 pitch;
/* unpin of the old buffer */
r = radeon_bo_reserve(work->old_rbo, false);
if (likely(r == 0)) {
- r = radeon_bo_unpin(work->old_rbo);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to unpin buffer after flip\n");
- }
+ radeon_bo_unpin(work->old_rbo);
radeon_bo_unreserve(work->old_rbo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
- if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
- }
+ radeon_bo_unpin(new_rbo);
radeon_bo_unreserve(new_rbo);
cleanup:
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
-void radeon_gem_object_free(struct drm_gem_object *obj);
-int radeon_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-void radeon_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
- int flags);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
unsigned int flags, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *,
struct sg_table *sg);
-int radeon_gem_prime_pin(struct drm_gem_object *obj);
-void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
-void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
}
ret = drm_ioctl(filp, cmd, arg);
-
+
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.ioctls = radeon_ioctls_kms,
- .gem_free_object_unlocked = radeon_gem_object_free,
- .gem_open_object = radeon_gem_object_open,
- .gem_close_object = radeon_gem_object_close,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = radeon_gem_prime_export,
- .gem_prime_pin = radeon_gem_prime_pin,
- .gem_prime_unpin = radeon_gem_prime_unpin,
- .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
- .gem_prime_vmap = radeon_gem_prime_vmap,
- .gem_prime_vunmap = radeon_gem_prime_vunmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#include "radeon.h"
-void radeon_gem_object_free(struct drm_gem_object *gobj)
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
+ int flags);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+static const struct drm_gem_object_funcs radeon_gem_object_funcs;
+
+static void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
return r;
}
*obj = &robj->tbo.base;
+ (*obj)->funcs = &radeon_gem_object_funcs;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct radeon_bo *rbo = gem_to_radeon_bo(obj);
struct radeon_device *rdev = rbo->rdev;
return 0;
}
-void radeon_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct radeon_bo *rbo = gem_to_radeon_bo(obj);
struct radeon_device *rdev = rbo->rdev;
return r;
}
+static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
+ .free = radeon_gem_object_free,
+ .open = radeon_gem_object_open,
+ .close = radeon_gem_object_close,
+ .export = radeon_gem_prime_export,
+ .pin = radeon_gem_prime_pin,
+ .unpin = radeon_gem_prime_unpin,
+ .get_sg_table = radeon_gem_prime_get_sg_table,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
+};
+
/*
* GEM ioctls.
*/
rbo->placements[c].fpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
rbo->placements[c].mem_type = TTM_PL_VRAM;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
+ rbo->placements[c++].flags = 0;
}
rbo->placements[c].fpfn = 0;
rbo->placements[c].mem_type = TTM_PL_VRAM;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
+ rbo->placements[c++].flags = 0;
}
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_TT;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
-
- } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
- (rbo->rdev->flags & RADEON_IS_AGP)) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_TT;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- } else {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_TT;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
- }
+ rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_TT;
+ rbo->placements[c++].flags = 0;
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
-
- } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
- rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- } else {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
- }
+ rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ rbo->placements[c++].flags = 0;
}
if (!c) {
rbo->placements[c].fpfn = 0;
rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+ rbo->placements[c++].flags = 0;
}
rbo->placement.num_placement = c;
if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
return -EPERM;
- if (bo->pin_count) {
- bo->pin_count++;
+ if (bo->tbo.pin_count) {
+ ttm_bo_pin(&bo->tbo);
if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo);
bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
else
bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
-
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
- bo->pin_count = 1;
+ ttm_bo_pin(&bo->tbo);
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
if (domain == RADEON_GEM_DOMAIN_VRAM)
return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
-int radeon_bo_unpin(struct radeon_bo *bo)
+void radeon_bo_unpin(struct radeon_bo *bo)
{
- struct ttm_operation_ctx ctx = { false, false };
- int r, i;
-
- if (!bo->pin_count) {
- dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (likely(r == 0)) {
+ ttm_bo_unpin(&bo->tbo);
+ if (!bo->tbo.pin_count) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
else
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
- } else {
- dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
}
- return r;
}
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
+ struct ttm_bo_device *bdev = &rdev->mman.bdev;
+ struct ttm_resource_manager *man;
+
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
#ifndef CONFIG_HIBERNATION
if (rdev->flags & RADEON_IS_IGP) {
return 0;
}
#endif
- return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ man = ttm_manager_type(bdev, TTM_PL_VRAM);
+ return ttm_resource_manager_evict_all(bdev, man);
}
void radeon_bo_force_delete(struct radeon_device *rdev)
list_for_each_entry(lobj, head, tv.head) {
struct radeon_bo *bo = lobj->robj;
- if (!bo->pin_count) {
+ if (!bo->tbo.pin_count) {
u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
break;
old_object = reg->bo;
- if (old_object->pin_count == 0)
+ if (old_object->tbo.pin_count == 0)
steal = i;
}
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
-int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct radeon_device *rdev;
return 0;
/* Can't move a pinned BO to visible VRAM */
- if (rbo->pin_count > 0)
- return -EINVAL;
+ if (rbo->tbo.pin_count > 0)
+ return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &rbo->placement, &ctx);
- } else if (unlikely(r != 0)) {
- return r;
+ r = ttm_bo_validate(bo, &rbo->placement, &ctx);
+ } else if (likely(!r)) {
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return VM_FAULT_SIGBUS;
}
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should never happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
+ if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(r))
+ return VM_FAULT_SIGBUS;
+ ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
-int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0))
- return r;
- if (mem_type)
- *mem_type = bo->tbo.mem.mem_type;
-
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
-}
-
/**
* radeon_bo_fence - add fence to buffer object
*
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
-extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
- bool no_wait);
-
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
u64 max_offset, u64 *gpu_addr);
-extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern void radeon_bo_unpin(struct radeon_bo *bo);
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
-extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared);
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_placement.h>
#include "radeon_reg.h"
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
+static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm);
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
struct radeon_bo *rbo;
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict,
struct ttm_resource *new_mem,
struct ttm_resource *old_mem)
{
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
- struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource tmp_mem;
struct ttm_place placements;
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+ placements.flags = 0;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
- r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r)) {
goto out_cleanup;
}
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, &ctx, new_mem);
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (unlikely(r))
+ goto out_cleanup;
+
+ radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
- struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource tmp_mem;
struct ttm_placement placement;
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+ placements.flags = 0;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
- if (unlikely(r)) {
+
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(r))
goto out_cleanup;
- }
- r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+
+ r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem);
+ if (unlikely(r))
+ goto out_cleanup;
+
+ ttm_bo_assign_mem(bo, &tmp_mem);
+ r = radeon_move_blit(bo, true, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
struct ttm_resource *old_mem = &bo->mem;
int r;
- r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ if (new_mem->mem_type == TTM_PL_TT) {
+ r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
+ if (r)
+ return r;
+ }
+ radeon_bo_move_notify(bo, evict, new_mem);
+
+ r = ttm_bo_wait_ctx(bo, ctx);
if (r)
- return r;
+ goto fail;
/* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
- if (WARN_ON_ONCE(rbo->pin_count > 0))
+ if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
return -EINVAL;
rdev = radeon_get_rdev(bo->bdev);
ttm_bo_move_null(bo, new_mem);
return 0;
}
- if ((old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM) ||
- (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enough */
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
+
+ if (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
rdev->asic->copy.copy == NULL) {
/* use memcpy */
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
- r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = radeon_move_vram_ram(bo, evict, ctx, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
- r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = radeon_move_ram_vram(bo, evict, ctx, new_mem);
} else {
- r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
+ r = radeon_move_blit(bo, evict,
new_mem, old_mem);
}
memcpy:
r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
- return r;
+ goto fail;
}
}
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
return 0;
+fail:
+ swap(*new_mem, bo->mem);
+ radeon_bo_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ return r;
}
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
mem->bus.offset = (mem->start << PAGE_SHIFT) +
rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ mem->bus.caching = ttm_write_combined;
}
#endif
break;
return -EINVAL;
mem->bus.offset += rdev->mc.aper_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
#ifdef __alpha__
/*
* Alpha: use bus.addr to hold the ioremap() return,
* so we can modify bus.base below.
*/
- if (mem->placement & TTM_PL_FLAG_WC)
- mem->bus.addr =
- ioremap_wc(mem->bus.offset, bus_size);
- else
- mem->bus.addr =
- ioremap(mem->bus.offset, bus_size);
+ mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
if (!mem->bus.addr)
return -ENOMEM;
* TTM backend functions.
*/
struct radeon_ttm_tt {
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
u64 offset;
uint64_t userptr;
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- if (ttm->caching_state == tt_cached)
+ if (ttm->caching == ttm_cached)
flags |= RADEON_GART_PAGE_SNOOP;
r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
radeon_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
- ttm_dma_tt_fini(>t->ttm);
+ ttm_tt_fini(>t->ttm);
kfree(gtt);
}
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
+ enum ttm_caching caching;
+ struct radeon_bo *rbo;
+
+ rbo = container_of(bo, struct radeon_bo, tbo);
rdev = radeon_get_rdev(bo->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (gtt == NULL) {
return NULL;
}
- if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
+
+ if (rbo->flags & RADEON_GEM_GTT_UC)
+ caching = ttm_uncached;
+ else if (rbo->flags & RADEON_GEM_GTT_WC)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
+
+ if (ttm_dma_tt_init(>t->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
- return >t->ttm.ttm;
+ return >t->ttm;
}
static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
if (!ttm)
return NULL;
- return container_of(ttm, struct radeon_ttm_tt, ttm.ttm);
+ return container_of(ttm, struct radeon_ttm_tt, ttm);
}
static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm_tt_set_populated(ttm);
return 0;
}
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
-#if IS_ENABLED(CONFIG_AGP)
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_pool_populate(ttm, ctx);
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
- return ttm_dma_populate(>t->ttm, rdev->dev, ctx);
- }
-#endif
-
- return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx);
+ return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
}
static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
if (slave)
return;
-#if IS_ENABLED(CONFIG_AGP)
- if (rdev->flags & RADEON_IS_AGP) {
- ttm_pool_unpopulate(ttm);
- return;
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(>t->ttm, rdev->dev);
- return;
- }
-#endif
-
- ttm_unmap_and_unpopulate_pages(rdev->dev, >t->ttm);
+ return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
}
int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
}
+static void
+radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ radeon_bo_move_notify(bo, false, NULL);
+}
+
static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .ttm_tt_bind = &radeon_ttm_tt_bind,
- .ttm_tt_unbind = &radeon_ttm_tt_unbind,
.ttm_tt_destroy = &radeon_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
- .move_notify = &radeon_bo_move_notify,
- .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .delete_mem_notify = &radeon_bo_delete_mem_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
};
int r;
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&rdev->mman.bdev,
- &radeon_bo_driver,
+ r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager,
+ rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
}
rdev->mman.initialized = true;
+ ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
+ dma_addressing_limited(&rdev->pdev->dev));
+
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
- struct ttm_buffer_object *bo;
- struct radeon_device *rdev;
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
vm_fault_t ret;
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
-
- rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- ret = ttm_bo_vm_fault(vmf);
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ goto unlock_mclk;
+
+ ret = radeon_bo_fault_reserve_notify(bo);
+ if (ret)
+ goto unlock_resv;
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ goto unlock_mclk;
+
+unlock_resv:
+ dma_resv_unlock(bo->base.resv);
+
+unlock_mclk:
up_read(&rdev->pm.mclk_lock);
return ret;
}
return 0;
}
+static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
+}
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
static struct drm_info_list radeon_ttm_debugfs_list[] = {
{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+ {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL}
};
static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
count = ARRAY_SIZE(radeon_ttm_debugfs_list);
-#ifdef CONFIG_SWIOTLB
- if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
- --count;
-#endif
-
return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
#else
}
static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
}
static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
struct rcar_du_device *rcdu = rcrtc->dev;
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
- .gem_prime_vmap = rockchip_gem_prime_vmap,
- .gem_prime_vunmap = rockchip_gem_prime_vunmap,
.gem_prime_mmap = rockchip_gem_mmap_buf,
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
#include <drm/drm.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
kfree(rk_obj);
}
+static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
+ .free = rockchip_gem_free_object,
+ .get_sg_table = rockchip_gem_prime_get_sg_table,
+ .vmap = rockchip_gem_prime_vmap,
+ .vunmap = rockchip_gem_prime_vunmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
obj = &rk_obj->base;
+ obj->funcs = &rockchip_gem_object_funcs;
+
drm_gem_object_init(drm, obj, size);
return rk_obj;
}
/*
- * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
+ * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
* callback function
*/
void rockchip_gem_free_object(struct drm_gem_object *obj)
}
static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct vop *vop = to_vop(crtc);
}
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct vop *vop = to_vop(crtc);
const struct vop_data *vop_data = vop->data;
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
init_completion(&entity->entity_idle);
+ /* We start in an idle state. */
+ complete(&entity->entity_idle);
+
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
#include "sti_vtg.h"
static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
}
static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
}
static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
}
static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
}
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
}
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
}
static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
int err;
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
/* apply PLL and pixel clock changes */
- tegra_dc_commit_state(dc, state);
+ tegra_dc_commit_state(dc, crtc_state);
/* program display mode */
tegra_dc_set_timings(dc, mode);
.debugfs_init = tegra_debugfs_init,
#endif
- .gem_free_object_unlocked = tegra_bo_free_object,
- .gem_vm_ops = &tegra_bo_vm_ops,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = tegra_gem_prime_export,
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct dma_buf_map map;
+ int ret;
- if (obj->vaddr)
+ if (obj->vaddr) {
return obj->vaddr;
- else if (obj->gem.import_attach)
- return dma_buf_vmap(obj->gem.import_attach->dmabuf);
- else
+ } else if (obj->gem.import_attach) {
+ ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+ return ret ? NULL : map.vaddr;
+ } else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
+ }
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
if (obj->vaddr)
return;
else if (obj->gem.import_attach)
- dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+ dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
else
vunmap(addr);
}
return 0;
}
+static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
+ .free = tegra_bo_free_object,
+ .export = tegra_gem_prime_export,
+ .vm_ops = &tegra_bo_vm_ops,
+};
+
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
size_t size)
{
if (!bo)
return ERR_PTR(-ENOMEM);
+ bo->gem.funcs = &tegra_gem_object_funcs;
+
host1x_bo_init(&bo->base, &tegra_bo_ops);
size = round_up(size, PAGE_SIZE);
return __tegra_gem_mmap(gem, vma);
}
-static void *tegra_gem_prime_vmap(struct dma_buf *buf)
+static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
- return bo->vaddr;
+ dma_buf_map_set_vaddr(map, bo->vaddr);
+
+ return 0;
}
-static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
+static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
{
}
}
static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
}
static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "tidss_crtc.h"
#include "tidss_dispc.h"
}
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
.atomic_disable = tidss_plane_atomic_disable,
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA);
- tilcdc_set(dev, LCDC_DMA_CTRL_REG,
- LCDC_V1_END_OF_FRAME_INT_ENA);
} else {
tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
LCDC_V2_UNDERFLOW_INT_ENA |
- LCDC_V2_END_OF_FRAME0_INT_ENA |
LCDC_FRAME_DONE | LCDC_SYNC_LOST);
}
}
}
static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
tilcdc_crtc_enable(crtc);
}
}
static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
tilcdc_crtc_disable(crtc);
}
static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ tilcdc_clear_irqstatus(dev, LCDC_END_OF_FRAME0);
+
+ if (priv->rev == 1)
+ tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+ LCDC_V1_END_OF_FRAME_INT_ENA);
+ else
+ tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_END_OF_FRAME0_INT_ENA);
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
return 0;
}
static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
+ LCDC_V1_END_OF_FRAME_INT_ENA);
+ else
+ tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_END_OF_FRAME0_INT_ENA);
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
}
static void tilcdc_crtc_reset(struct drm_crtc *crtc)
.disable_vblank = tilcdc_crtc_disable_vblank,
};
-int tilcdc_crtc_max_width(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
- int max_width = 0;
-
- if (priv->rev == 1)
- max_width = 1024;
- else if (priv->rev == 2)
- max_width = 2048;
-
- return max_width;
-}
-
static enum drm_mode_status
tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
* check to see if the width is within the range that
* the LCD Controller physically supports
*/
- if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+ if (mode->hdisplay > priv->max_width)
return MODE_VIRTUAL_X;
/* width must be multiple of 16 */
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+ dev->mode_config.max_width = priv->max_width;
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
}
goto init_failed;
}
- if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
- priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
-
- DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
-
- if (of_property_read_u32(node, "max-width", &priv->max_width))
- priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
-
- DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
-
- if (of_property_read_u32(node, "max-pixelclock",
- &priv->max_pixelclock))
- priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
-
- DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
-
pm_runtime_enable(dev);
/* Determine LCD IP Version */
}
}
+ if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+ priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
+
+ DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
+
+ if (of_property_read_u32(node, "max-width", &priv->max_width)) {
+ if (priv->rev == 1)
+ priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V1;
+ else
+ priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V2;
+ }
+
+ DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
+
+ if (of_property_read_u32(node, "max-pixelclock",
+ &priv->max_pixelclock))
+ priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
+
+ DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
+
ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
dev_err(dev, "failed to create crtc\n");
}
static struct drm_info_list tilcdc_debugfs_list[] = {
- { "regs", tilcdc_regs_show, 0 },
- { "mm", tilcdc_mm_show, 0 },
+ { "regs", tilcdc_regs_show, 0, NULL },
+ { "mm", tilcdc_mm_show, 0, NULL },
};
static void tilcdc_debugfs_init(struct drm_minor *minor)
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
-/* Defaulting to max width as defined on AM335x */
-#define TILCDC_DEFAULT_MAX_WIDTH 2048
+/* Maximum display width for LCDC V1 */
+#define TILCDC_DEFAULT_MAX_WIDTH_V1 1024
+/* ... and for LCDC V2 found on AM335x: */
+#define TILCDC_DEFAULT_MAX_WIDTH_V2 2048
/*
* This may need some tweaking, but want to allow at least 1280x1024@60
* with optimized DDR & EMIF settings tweaked 1920x1080@24 appears to
const struct tilcdc_panel_info *info);
void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
-int tilcdc_crtc_max_width(struct drm_crtc *crtc);
void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \
- ttm_resource.o
+ ttm_execbuf_util.o ttm_range_manager.o \
+ ttm_resource.o ttm_pool.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
-ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/agp_backend.h>
#include <linux/module.h>
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
- int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ int ret, cached = ttm->caching == ttm_cached;
unsigned i;
if (agp_be->mem)
agp_be->mem = NULL;
agp_be->bridge = bridge;
- if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
+ if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) {
kfree(agp_be);
return NULL;
}
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
- if (!list_empty(&bo->lru))
- return;
-
- if (mem->placement & TTM_PL_FLAG_NO_EVICT)
+ if (!list_empty(&bo->lru) || bo->pin_count)
return;
man = ttm_manager_type(bdev, mem->mem_type);
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
- if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+ if (bulk && !bo->pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_TT:
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
if (ret)
goto out_err;
- ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
- if (ret)
- goto out_err;
-
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_populate(bdev, bo->ttm, ctx);
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret)
goto out_err;
-
- ret = ttm_bo_tt_bind(bo, mem);
- if (ret)
- goto out_err;
- }
-
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, evict, mem);
- bo->mem = *mem;
- goto moved;
}
}
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, evict, mem);
-
- if (old_man->use_tt && new_man->use_tt)
- ret = ttm_bo_move_ttm(bo, ctx, mem);
- else if (bdev->driver->move)
- ret = bdev->driver->move(bo, evict, ctx, mem);
- else
- ret = ttm_bo_move_memcpy(bo, ctx, mem);
-
- if (ret) {
- if (bdev->driver->move_notify) {
- swap(*mem, bo->mem);
- bdev->driver->move_notify(bo, false, mem);
- swap(*mem, bo->mem);
- }
-
+ ret = bdev->driver->move(bo, evict, ctx, mem);
+ if (ret)
goto out_err;
- }
-moved:
ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
return 0;
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- if (bo->bdev->driver->move_notify)
- bo->bdev->driver->move_notify(bo, false, NULL);
+ if (bo->bdev->driver->delete_mem_notify)
+ bo->bdev->driver->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->mem);
spin_lock(&ttm_bo_glob.lru_lock);
/*
- * Make NO_EVICT bos immediately available to
+ * Make pinned bos immediately available to
* shrinkers, now that they are queued for
* destruction.
*/
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ if (bo->pin_count) {
+ bo->pin_count = 0;
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
}
return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
-static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
- uint32_t cur_placement,
- uint32_t proposed_placement)
-{
- uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
- uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
-
- /**
- * Keep current caching if possible.
- */
-
- if ((cur_placement & caching) != 0)
- result |= (cur_placement & caching);
- else if ((TTM_PL_FLAG_CACHED & caching) != 0)
- result |= TTM_PL_FLAG_CACHED;
- else if ((TTM_PL_FLAG_WC & caching) != 0)
- result |= TTM_PL_FLAG_WC;
- else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
- result |= TTM_PL_FLAG_UNCACHED;
-
- return result;
-}
-
/**
* ttm_bo_mem_placement - check if placement is compatible
* @bo: BO to find memory for
* @place: where to search
* @mem: the memory object to fill in
- * @ctx: operation context
*
* Check if placement is compatible and fill in mem structure.
* Returns -EBUSY if placement won't work or negative error code.
*/
static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem,
- struct ttm_operation_ctx *ctx)
+ struct ttm_resource *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
- uint32_t cur_flags = 0;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
return -EBUSY;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- place->flags);
- cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
-
mem->mem_type = place->mem_type;
- mem->placement = cur_flags;
+ mem->placement = place->flags;
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_del_from_lru(bo);
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ ret = ttm_bo_mem_placement(bo, place, mem);
if (ret)
continue;
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
- ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ ret = ttm_bo_mem_placement(bo, place, mem);
if (ret)
continue;
continue;
*new_flags = heap->flags;
- if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (mem->mem_type == heap->mem_type) &&
+ if ((mem->mem_type == heap->mem_type) &&
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
- } else {
- bo->mem.placement &= TTM_PL_MASK_CACHING;
- bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
}
/*
* We might need to add a TTM.
bo->mem.bus.offset = 0;
bo->mem.bus.addr = NULL;
bo->moving = NULL;
- bo->mem.placement = TTM_PL_FLAG_CACHED;
+ bo->mem.placement = 0;
bo->acc_size = acc_size;
+ bo->pin_count = 0;
bo->sg = sg;
if (resv) {
bo->base.resv = resv;
}
EXPORT_SYMBOL(ttm_bo_init);
-static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
-{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
-
- size += ttm_round_pot(struct_size);
- size += ttm_round_pot(npages * sizeof(void *));
- size += ttm_round_pot(sizeof(struct ttm_tt));
- return size;
-}
-
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size)
size += ttm_round_pot(struct_size);
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
- size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
-int ttm_bo_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct ttm_buffer_object **p_bo)
-{
- struct ttm_buffer_object *bo;
- size_t acc_size;
- int ret;
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (unlikely(bo == NULL))
- return -ENOMEM;
-
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
- ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- interruptible, acc_size,
- NULL, NULL, NULL);
- if (likely(ret == 0))
- *p_bo = bo;
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_create);
-
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
-{
- struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
-
- if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory manager memory type %u\n", mem_type);
- return -EINVAL;
- }
-
- if (!man) {
- pr_err("Memory type %u has not been initialized\n", mem_type);
- return 0;
- }
-
- return ttm_resource_manager_force_list_clean(bdev, man);
-}
-EXPORT_SYMBOL(ttm_bo_evict_mm);
-
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
struct ttm_bo_global *glob =
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
+ ttm_pool_fini(&bdev->pool);
+
if (!ret)
ttm_bo_global_release();
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
+ struct device *dev,
struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool need_dma32)
+ bool use_dma_alloc, bool use_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
bdev->driver = driver;
ttm_bo_init_sysman(bdev);
+ ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
-int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct ttm_buffer_object *bo;
int ret = -EBUSY;
bool locked;
* Move to system cached
*/
- if (bo->mem.mem_type != TTM_PL_SYSTEM ||
- bo->ttm->caching_state != tt_cached) {
+ if (bo->mem.mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource evict_mem;
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.placement = TTM_PL_FLAG_CACHED;
+ evict_mem.placement = 0;
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
if (bo->bdev->driver->swap_notify)
bo->bdev->driver->swap_notify(bo);
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm);
out:
/**
}
EXPORT_SYMBOL(ttm_bo_swapout);
-void ttm_bo_swapout_all(void)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
-
- while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_swapout_all);
-
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{
if (bo->ttm == NULL)
bo->ttm = NULL;
}
-int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
-{
- return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
-}
-
-void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
-{
- bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
-}
struct ttm_buffer_object *bo;
};
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
-{
- ttm_resource_free(bo, &bo->mem);
-}
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
-{
- struct ttm_tt *ttm = bo->ttm;
- struct ttm_resource *old_mem = &bo->mem;
- int ret;
-
- if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- pr_err("Failed to expire sync object before unbinding TTM\n");
- return ret;
- }
-
- ttm_bo_tt_unbind(bo);
- ttm_bo_free_old_node(bo);
- old_mem->mem_type = TTM_PL_SYSTEM;
- }
-
- ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
- if (unlikely(ret != 0))
- return ret;
-
- if (new_mem->mem_type != TTM_PL_SYSTEM) {
-
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_tt_bind(bo, new_mem);
- if (unlikely(ret != 0))
- return ret;
- }
-
- ttm_bo_assign_mem(bo, new_mem);
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem)
{
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
addr = ioremap_wc(mem->bus.offset, bus_size);
else
addr = ioremap(mem->bus.offset, bus_size);
void *new_iomap;
int ret;
unsigned long i;
- unsigned long page;
- unsigned long add = 0;
- int dir;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
goto out1;
}
- add = 0;
- dir = 1;
-
- if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->start < old_mem->start + old_mem->size)) {
- dir = -1;
- add = new_mem->num_pages - 1;
- }
-
for (i = 0; i < new_mem->num_pages; ++i) {
- page = i * dir + add;
if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(old_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+ pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
prot);
} else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(new_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+ pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
prot);
} else {
- ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ ret = ttm_copy_io_page(new_iomap, old_iomap, i);
}
if (ret)
goto out1;
return -ENOMEM;
fbo->base = *bo;
- fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
ttm_bo_get(bo);
fbo->bo = bo;
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
+ fbo->base.pin_count = 1;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
return 0;
}
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp)
{
+ struct ttm_resource_manager *man;
+ enum ttm_caching caching;
+
+ man = ttm_manager_type(bo->bdev, res->mem_type);
+ caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+
/* Cached mappings need no adjustment */
- if (caching_flags & TTM_PL_FLAG_CACHED)
+ if (caching == ttm_cached)
return tmp;
#if defined(__i386__) || defined(__x86_64__)
- if (caching_flags & TTM_PL_FLAG_WC)
+ if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__)
- if (caching_flags & TTM_PL_FLAG_WC)
+ if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
else
if (ret)
return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && ttm->caching == ttm_cached) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
- ttm_bo_free_old_node(bo);
+ ttm_resource_free(bo, &bo->mem);
return 0;
}
}
spin_unlock(&from->move_lock);
- ttm_bo_free_old_node(bo);
+ ttm_resource_free(bo, &bo->mem);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
return VM_FAULT_NOPAGE;
}
+ /*
+ * Refuse to fault imported pages. This should be handled
+ * (if at all) by redirecting mmap to the exporter.
+ */
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
- /*
- * Refuse to fault imported pages. This should be handled
- * (if at all) by redirecting mmap to the exporter.
- */
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- return VM_FAULT_SIGBUS;
-
- if (bdev->driver->fault_reserve_notify) {
- struct dma_fence *moving = dma_fence_get(bo->moving);
-
- err = bdev->driver->fault_reserve_notify(bo);
- switch (err) {
- case 0:
- break;
- case -EBUSY:
- case -ERESTARTSYS:
- dma_fence_put(moving);
- return VM_FAULT_NOPAGE;
- default:
- dma_fence_put(moving);
- return VM_FAULT_SIGBUS;
- }
-
- if (bo->moving != moving) {
- ttm_bo_move_to_lru_tail_unlocked(bo);
- }
- dma_fence_put(moving);
- }
-
/*
* Wait for buffer data in transit, due to a pipelined
* move.
if (unlikely(page_offset >= bo->num_pages))
return VM_FAULT_SIGBUS;
- prot = ttm_io_prot(bo->mem.placement, prot);
+ prot = ttm_io_prot(bo, &bo->mem, prot);
if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/swap.h>
+#include <drm/ttm/ttm_pool.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
+ ret = ttm_bo_swapout(ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
- ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
- ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
unsigned int i;
/* let the page allocator first stop the shrink work. */
- ttm_page_alloc_fini();
- ttm_dma_page_alloc_fini();
+ ttm_pool_mgr_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
+++ /dev/null
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- * Pauli Nieminen <suokkos@gmail.com>
- */
-
-/* simple list based uncached page pool
- * - Pool collects resently freed pages for reuse
- * - Use page->lru to keep a free list
- * - doesn't track currently in use pages
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/atomic.h>
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 16
-#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define PAGE_FREE_INTERVAL 1000
-
-/**
- * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
- *
- * @lock: Protects the shared pool from concurrnet access. Must be used with
- * irqsave/irqrestore variants because pool allocator maybe called from
- * delayed work.
- * @fill_lock: Prevent concurrent calls to fill.
- * @list: Pool of free uc/wc pages for fast reuse.
- * @gfp_flags: Flags to pass for alloc_page.
- * @npages: Number of pages in pool.
- */
-struct ttm_page_pool {
- spinlock_t lock;
- bool fill_lock;
- struct list_head list;
- gfp_t gfp_flags;
- unsigned npages;
- char *name;
- unsigned long nfrees;
- unsigned long nrefills;
- unsigned int order;
-};
-
-/**
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-#define NUM_POOLS 6
-
-/**
- * struct ttm_pool_manager - Holds memory pools for fst allocation
- *
- * Manager is read only object for pool code so it doesn't need locking.
- *
- * @free_interval: minimum number of jiffies between freeing pages from pool.
- * @page_alloc_inited: reference counting for pool allocation.
- * @work: Work that is used to shrink the pool. Work is only run when there is
- * some pages to free.
- * @small_allocation: Limit in number of pages what is small allocation.
- *
- * @pools: All pool objects in use.
- **/
-struct ttm_pool_manager {
- struct kobject kobj;
- struct shrinker mm_shrink;
- struct ttm_pool_opts options;
-
- union {
- struct ttm_page_pool pools[NUM_POOLS];
- struct {
- struct ttm_page_pool wc_pool;
- struct ttm_page_pool uc_pool;
- struct ttm_page_pool wc_pool_dma32;
- struct ttm_page_pool uc_pool_dma32;
- struct ttm_page_pool wc_pool_huge;
- struct ttm_page_pool uc_pool_huge;
- } ;
- };
-};
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj,
- struct attribute *attr, const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max)
- m->options.max_size = val;
- else if (attr == &ttm_page_pool_small)
- m->options.small = val;
- else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-static struct ttm_pool_manager *_manager;
-
-/**
- * Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
- enum ttm_caching_state cstate)
-{
- int pool_index;
-
- if (cstate == tt_cached)
- return NULL;
-
- if (cstate == tt_wc)
- pool_index = 0x0;
- else
- pool_index = 0x1;
-
- if (flags & TTM_PAGE_FLAG_DMA32) {
- if (huge)
- return NULL;
- pool_index |= 0x2;
-
- } else if (huge) {
- pool_index |= 0x4;
- }
-
- return &_manager->pools[pool_index];
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages,
- unsigned int order)
-{
- unsigned int i, pages_nr = (1 << order);
-
- if (order == 0) {
- if (ttm_set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
- }
-
- for (i = 0; i < npages; ++i) {
- if (order > 0) {
- if (ttm_set_pages_wb(pages[i], pages_nr))
- pr_err("Failed to set %d pages to wb!\n", pages_nr);
- }
- __free_pages(pages[i], order);
- }
-}
-
-static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
- unsigned freed_pages)
-{
- pool->npages -= freed_pages;
- pool->nfrees += freed_pages;
-}
-
-/**
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @free_all: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
- **/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
- bool use_static)
-{
- static struct page *static_buf[NUM_PAGES_TO_ALLOC];
- unsigned long irq_flags;
- struct page *p;
- struct page **pages_to_free;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- if (use_static)
- pages_to_free = static_buf;
- else
- pages_to_free = kmalloc_array(npages_to_free,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!pages_to_free) {
- pr_debug("Failed to allocate memory for pool free operation\n");
- return 0;
- }
-
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- list_for_each_entry_reverse(p, &pool->list, lru) {
- if (freed_pages >= npages_to_free)
- break;
-
- pages_to_free[freed_pages++] = p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
- /* remove range of pages from the pool */
- __list_del(p->lru.prev, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_pages_put(pages_to_free, freed_pages, pool->order);
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- __list_del(&p->lru, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_pages_put(pages_to_free, freed_pages, pool->order);
-out:
- if (pages_to_free != static_buf)
- kfree(pages_to_free);
- return nr_free;
-}
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * This code is crying out for a shrinker per pool....
- */
-static unsigned long
-ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- static DEFINE_MUTEX(lock);
- static unsigned start_pool;
- unsigned i;
- unsigned pool_offset;
- struct ttm_page_pool *pool;
- int shrink_pages = sc->nr_to_scan;
- unsigned long freed = 0;
- unsigned int nr_free_pool;
-
- if (!mutex_trylock(&lock))
- return SHRINK_STOP;
- pool_offset = ++start_pool % NUM_POOLS;
- /* select start pool in round robin fashion */
- for (i = 0; i < NUM_POOLS; ++i) {
- unsigned nr_free = shrink_pages;
- unsigned page_nr;
-
- if (shrink_pages == 0)
- break;
-
- pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- page_nr = (1 << pool->order);
- /* OK to use static buffer since global mutex is held. */
- nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
- shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
- freed += (nr_free_pool - shrink_pages) << pool->order;
- if (freed >= sc->nr_to_scan)
- break;
- shrink_pages <<= pool->order;
- }
- mutex_unlock(&lock);
- return freed;
-}
-
-
-static unsigned long
-ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- unsigned i;
- unsigned long count = 0;
- struct ttm_page_pool *pool;
-
- for (i = 0; i < NUM_POOLS; ++i) {
- pool = &_manager->pools[i];
- count += (pool->npages << pool->order);
- }
-
- return count;
-}
-
-static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.count_objects = ttm_pool_shrink_count;
- manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
- manager->mm_shrink.seeks = 1;
- return register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-static int ttm_set_pages_caching(struct page **pages,
- enum ttm_caching_state cstate, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- switch (cstate) {
- case tt_uncached:
- r = ttm_set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("Failed to set %d pages to uc!\n", cpages);
- break;
- case tt_wc:
- r = ttm_set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("Failed to set %d pages to wc!\n", cpages);
- break;
- default:
- break;
- }
- return r;
-}
-
-/**
- * Free pages the pages that failed to change the caching state. If there is
- * any pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_handle_caching_state_failure(struct list_head *pages,
- int ttm_flags, enum ttm_caching_state cstate,
- struct page **failed_pages, unsigned cpages)
-{
- unsigned i;
- /* Failed pages have to be freed */
- for (i = 0; i < cpages; ++i) {
- list_del(&failed_pages[i]->lru);
- __free_page(failed_pages[i]);
- }
-}
-
-/**
- * Allocate new pages with correct caching.
- *
- * This function is reentrant if caller updates count depending on number of
- * pages returned in pages array.
- */
-static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching_state cstate,
- unsigned count, unsigned order)
-{
- struct page **caching_array;
- struct page *p;
- int r = 0;
- unsigned i, j, cpages;
- unsigned npages = 1 << order;
- unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
-
- /* allocate array for page caching change */
- caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
- GFP_KERNEL);
-
- if (!caching_array) {
- pr_debug("Unable to allocate table for new pages\n");
- return -ENOMEM;
- }
-
- for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_pages(gfp_flags, order);
-
- if (!p) {
- pr_debug("Unable to get page %u\n", i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(caching_array,
- cstate, cpages);
- if (r)
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
- }
- r = -ENOMEM;
- goto out;
- }
-
- list_add(&p->lru, pages);
-
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (PageHighMem(p))
- continue;
-
-#endif
- for (j = 0; j < npages; ++j) {
- caching_array[cpages++] = p++;
- if (cpages == max_cpages) {
-
- r = ttm_set_pages_caching(caching_array,
- cstate, cpages);
- if (r) {
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
- goto out;
- }
- cpages = 0;
- }
- }
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(caching_array, cstate, cpages);
- if (r)
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
- }
-out:
- kfree(caching_array);
-
- return r;
-}
-
-/**
- * Fill the given pool if there aren't enough pages and the requested number of
- * pages is small.
- */
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count, unsigned long *irq_flags)
-{
- struct page *p;
- int r;
- unsigned cpages = 0;
- /**
- * Only allow one pool fill operation at a time.
- * If pool doesn't have enough pages for the allocation new pages are
- * allocated from outside of pool.
- */
- if (pool->fill_lock)
- return;
-
- pool->fill_lock = true;
-
- /* If allocation request is small and there are not enough
- * pages in a pool we fill the pool up first. */
- if (count < _manager->options.small
- && count > pool->npages) {
- struct list_head new_pages;
- unsigned alloc_size = _manager->options.alloc_size;
-
- /**
- * Can't change page caching if in irqsave context. We have to
- * drop the pool->lock.
- */
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- INIT_LIST_HEAD(&new_pages);
- r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size, 0);
- spin_lock_irqsave(&pool->lock, *irq_flags);
-
- if (!r) {
- list_splice(&new_pages, &pool->list);
- ++pool->nrefills;
- pool->npages += alloc_size;
- } else {
- pr_debug("Failed to fill pool (%p)\n", pool);
- /* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &new_pages, lru) {
- ++cpages;
- }
- list_splice(&new_pages, &pool->list);
- pool->npages += cpages;
- }
-
- }
- pool->fill_lock = false;
-}
-
-/**
- * Allocate pages from the pool and put them on the return list.
- *
- * @return zero for success or negative error code.
- */
-static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count, unsigned order)
-{
- unsigned long irq_flags;
- struct list_head *p;
- unsigned i;
- int r = 0;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- if (!order)
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
- &irq_flags);
-
- if (count >= pool->npages) {
- /* take all pages from the pool */
- list_splice_init(&pool->list, pages);
- count -= pool->npages;
- pool->npages = 0;
- goto out;
- }
- /* find the last pages to include for requested number of pages. Split
- * pool to begin and halve it to reduce search space. */
- if (count <= pool->npages/2) {
- i = 0;
- list_for_each(p, &pool->list) {
- if (++i == count)
- break;
- }
- } else {
- i = pool->npages + 1;
- list_for_each_prev(p, &pool->list) {
- if (--i == count)
- break;
- }
- }
- /* Cut 'count' number of pages from the pool */
- list_cut_position(pages, &pool->list, p);
- pool->npages -= count;
- count = 0;
-out:
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- /* clear the pages coming from the pool if requested */
- if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- struct page *page;
-
- list_for_each_entry(page, pages, lru) {
- if (PageHighMem(page))
- clear_highpage(page);
- else
- clear_page(page_address(page));
- }
- }
-
- /* If pool didn't have enough pages allocate new one. */
- if (count) {
- gfp_t gfp_flags = pool->gfp_flags;
-
- /* set zero flag for page allocation if required */
- if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_flags |= __GFP_RETRY_MAYFAIL;
-
- /* ttm_alloc_new_pages doesn't reference pool so we can run
- * multiple requests in parallel.
- **/
- r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
- count, order);
- }
-
- return r;
-}
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
-{
- struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
-#endif
- unsigned long irq_flags;
- unsigned i;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
- i = 0;
- while (i < npages) {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct page *p = pages[i];
-#endif
- unsigned order = 0, j;
-
- if (!pages[i]) {
- ++i;
- continue;
- }
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (!(flags & TTM_PAGE_FLAG_DMA32) &&
- (npages - i) >= HPAGE_PMD_NR) {
- for (j = 1; j < HPAGE_PMD_NR; ++j)
- if (++p != pages[i + j])
- break;
-
- if (j == HPAGE_PMD_NR)
- order = HPAGE_PMD_ORDER;
- }
-#endif
-
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_pages(pages[i], order);
-
- j = 1 << order;
- while (j) {
- pages[i++] = NULL;
- --j;
- }
- }
- return;
- }
-
- i = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (huge) {
- unsigned max_size, n2free;
-
- spin_lock_irqsave(&huge->lock, irq_flags);
- while ((npages - i) >= HPAGE_PMD_NR) {
- struct page *p = pages[i];
- unsigned j;
-
- if (!p)
- break;
-
- for (j = 1; j < HPAGE_PMD_NR; ++j)
- if (++p != pages[i + j])
- break;
-
- if (j != HPAGE_PMD_NR)
- break;
-
- list_add_tail(&pages[i]->lru, &huge->list);
-
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = NULL;
- huge->npages++;
- }
-
- /* Check that we don't go over the pool limit */
- max_size = _manager->options.max_size;
- max_size /= HPAGE_PMD_NR;
- if (huge->npages > max_size)
- n2free = huge->npages - max_size;
- else
- n2free = 0;
- spin_unlock_irqrestore(&huge->lock, irq_flags);
- if (n2free)
- ttm_page_pool_free(huge, n2free, false);
- }
-#endif
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- while (i < npages) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- list_add_tail(&pages[i]->lru, &pool->list);
- pages[i] = NULL;
- pool->npages++;
- }
- ++i;
- }
- /* Check that we don't go over the pool limit */
- npages = 0;
- if (pool->npages > _manager->options.max_size) {
- npages = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (npages)
- ttm_page_pool_free(pool, npages, false);
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages.
- */
-static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
-{
- struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
-#endif
- struct list_head plist;
- struct page *p = NULL;
- unsigned count, first;
- int r;
-
- /* No pool for cached pages */
- if (pool == NULL) {
- gfp_t gfp_flags = GFP_USER;
- unsigned i;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- unsigned j;
-#endif
-
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_flags |= __GFP_RETRY_MAYFAIL;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= GFP_DMA32;
- else
- gfp_flags |= GFP_HIGHUSER;
-
- i = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (!(gfp_flags & GFP_DMA32)) {
- while (npages >= HPAGE_PMD_NR) {
- gfp_t huge_flags = gfp_flags;
-
- huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM;
- huge_flags &= ~__GFP_MOVABLE;
- huge_flags &= ~__GFP_COMP;
- p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
- if (!p)
- break;
-
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = p++;
-
- npages -= HPAGE_PMD_NR;
- }
- }
-#endif
-
- first = i;
- while (npages) {
- p = alloc_page(gfp_flags);
- if (!p) {
- pr_debug("Unable to allocate page\n");
- return -ENOMEM;
- }
-
- /* Swap the pages if we detect consecutive order */
- if (i > first && pages[i - 1] == p - 1)
- swap(p, pages[i - 1]);
-
- pages[i++] = p;
- --npages;
- }
- return 0;
- }
-
- count = 0;
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (huge && npages >= HPAGE_PMD_NR) {
- INIT_LIST_HEAD(&plist);
- ttm_page_pool_get_pages(huge, &plist, flags, cstate,
- npages / HPAGE_PMD_NR,
- HPAGE_PMD_ORDER);
-
- list_for_each_entry(p, &plist, lru) {
- unsigned j;
-
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[count++] = &p[j];
- }
- }
-#endif
-
- INIT_LIST_HEAD(&plist);
- r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
- npages - count, 0);
-
- first = count;
- list_for_each_entry(p, &plist, lru) {
- struct page *tmp = p;
-
- /* Swap the pages if we detect consecutive order */
- if (count > first && pages[count - 1] == tmp - 1)
- swap(tmp, pages[count - 1]);
- pages[count++] = tmp;
- }
-
- if (r) {
- /* If there is any pages in the list put them back to
- * the pool.
- */
- pr_debug("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
- return r;
- }
-
- return 0;
-}
-
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
- char *name, unsigned int order)
-{
- spin_lock_init(&pool->lock);
- pool->fill_lock = false;
- INIT_LIST_HEAD(&pool->list);
- pool->npages = pool->nfrees = 0;
- pool->gfp_flags = flags;
- pool->name = name;
- pool->order = order;
-}
-
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- unsigned order = HPAGE_PMD_ORDER;
-#else
- unsigned order = 0;
-#endif
-
- WARN_ON(_manager);
-
- pr_info("Initializing pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- return -ENOMEM;
-
- ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
-
- ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
-
- ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
- GFP_USER | GFP_DMA32, "wc dma", 0);
-
- ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
- GFP_USER | GFP_DMA32, "uc dma", 0);
-
- ttm_page_pool_init_locked(&_manager->wc_pool_huge,
- (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM) &
- ~(__GFP_MOVABLE | __GFP_COMP),
- "wc huge", order);
-
- ttm_page_pool_init_locked(&_manager->uc_pool_huge,
- (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM) &
- ~(__GFP_MOVABLE | __GFP_COMP)
- , "uc huge", order);
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "pool");
- if (unlikely(ret != 0))
- goto error;
-
- ret = ttm_pool_mm_shrink_init(_manager);
- if (unlikely(ret != 0))
- goto error;
- return 0;
-
-error:
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
-}
-
-void ttm_page_alloc_fini(void)
-{
- int i;
-
- pr_info("Finalizing pool allocator\n");
- ttm_pool_mm_shrink_fini(_manager);
-
- /* OK to use static buffer since global mutex is no longer used. */
- for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
-
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-static void
-ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- unsigned i;
-
- if (mem_count_update == 0)
- goto put_pages;
-
- for (i = 0; i < mem_count_update; ++i) {
- if (!ttm->pages[i])
- continue;
-
- ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
- }
-
-put_pages:
- ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state);
- ttm_tt_set_unpopulated(ttm);
-}
-
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- unsigned i;
- int ret;
-
- if (ttm_tt_is_populated(ttm))
- return 0;
-
- if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
- return -ENOMEM;
-
- ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate_helper(ttm, 0);
- return ret;
- }
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- PAGE_SIZE, ctx);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate_helper(ttm, i);
- return -ENOMEM;
- }
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return ret;
- }
- }
-
- ttm_tt_set_populated(ttm);
- return 0;
-}
-EXPORT_SYMBOL(ttm_pool_populate);
-
-void ttm_pool_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
-}
-EXPORT_SYMBOL(ttm_pool_unpopulate);
-
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
- struct ttm_operation_ctx *ctx)
-{
- unsigned i, j;
- int r;
-
- r = ttm_pool_populate(&tt->ttm, ctx);
- if (r)
- return r;
-
- for (i = 0; i < tt->ttm.num_pages; ++i) {
- struct page *p = tt->ttm.pages[i];
- size_t num_pages = 1;
-
- for (j = i + 1; j < tt->ttm.num_pages; ++j) {
- if (++p != tt->ttm.pages[j])
- break;
-
- ++num_pages;
- }
-
- tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
- 0, num_pages * PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, tt->dma_address[i])) {
- while (i--) {
- dma_unmap_page(dev, tt->dma_address[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- tt->dma_address[i] = 0;
- }
- ttm_pool_unpopulate(&tt->ttm);
- return -EFAULT;
- }
-
- for (j = 1; j < num_pages; ++j) {
- tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
- ++i;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_populate_and_map_pages);
-
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
- unsigned i, j;
-
- for (i = 0; i < tt->ttm.num_pages;) {
- struct page *p = tt->ttm.pages[i];
- size_t num_pages = 1;
-
- if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
- ++i;
- continue;
- }
-
- for (j = i + 1; j < tt->ttm.num_pages; ++j) {
- if (++p != tt->ttm.pages[j])
- break;
-
- ++num_pages;
- }
-
- dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- i += num_pages;
- }
- ttm_pool_unpopulate(&tt->ttm);
-}
-EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct ttm_page_pool *p;
- unsigned i;
- char *h[] = {"pool", "refills", "pages freed", "size"};
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, "%7s %12s %13s %8s\n",
- h[0], h[1], h[2], h[3]);
- for (i = 0; i < NUM_POOLS; ++i) {
- p = &_manager->pools[i];
-
- seq_printf(m, "%7s %12ld %13ld %8d\n",
- p->name, p->nrefills,
- p->nfrees, p->npages);
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_page_alloc_debugfs);
+++ /dev/null
-/*
- * Copyright 2011 (c) Oracle Corp.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-/*
- * A simple DMA pool losely based on dmapool.c. It has certain advantages
- * over the DMA pools:
- * - Pool collects resently freed pages for reuse (and hooks up to
- * the shrinker).
- * - Tracks currently in use pages
- * - Tracks whether the page is UC, WB or cached (and reverts to WB
- * when freed).
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 4
-#define FREE_ALL_PAGES (~0U)
-#define VADDR_FLAG_HUGE_POOL 1UL
-#define VADDR_FLAG_UPDATED_COUNT 2UL
-
-enum pool_type {
- IS_UNDEFINED = 0,
- IS_WC = 1 << 1,
- IS_UC = 1 << 2,
- IS_CACHED = 1 << 3,
- IS_DMA32 = 1 << 4,
- IS_HUGE = 1 << 5
-};
-
-/*
- * The pool structure. There are up to nine pools:
- * - generic (not restricted to DMA32):
- * - write combined, uncached, cached.
- * - dma32 (up to 2^32 - so up 4GB):
- * - write combined, uncached, cached.
- * - huge (not restricted to DMA32):
- * - write combined, uncached, cached.
- * for each 'struct device'. The 'cached' is for pages that are actively used.
- * The other ones can be shrunk by the shrinker API if neccessary.
- * @pools: The 'struct device->dma_pools' link.
- * @type: Type of the pool
- * @lock: Protects the free_list from concurrnet access. Must be
- * used with irqsave/irqrestore variants because pool allocator maybe called
- * from delayed work.
- * @free_list: Pool of pages that are free to be used. No order requirements.
- * @dev: The device that is associated with these pools.
- * @size: Size used during DMA allocation.
- * @npages_free: Count of available pages for re-use.
- * @npages_in_use: Count of pages that are in use.
- * @nfrees: Stats when pool is shrinking.
- * @nrefills: Stats when the pool is grown.
- * @gfp_flags: Flags to pass for alloc_page.
- * @name: Name of the pool.
- * @dev_name: Name derieved from dev - similar to how dev_info works.
- * Used during shutdown as the dev_info during release is unavailable.
- */
-struct dma_pool {
- struct list_head pools; /* The 'struct device->dma_pools link */
- enum pool_type type;
- spinlock_t lock;
- struct list_head free_list;
- struct device *dev;
- unsigned size;
- unsigned npages_free;
- unsigned npages_in_use;
- unsigned long nfrees; /* Stats when shrunk. */
- unsigned long nrefills; /* Stats when grown. */
- gfp_t gfp_flags;
- char name[13]; /* "cached dma32" */
- char dev_name[64]; /* Constructed from dev */
-};
-
-/*
- * The accounting page keeping track of the allocated page along with
- * the DMA address.
- * @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page and a flag if the page belongs to a
- * huge pool
- * @dma: The bus address of the page. If the page is not allocated
- * via the DMA API, it will be -1.
- */
-struct dma_page {
- struct list_head page_list;
- unsigned long vaddr;
- struct page *p;
- dma_addr_t dma;
-};
-
-/*
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-/*
- * Contains the list of all of the 'struct device' and their corresponding
- * DMA pools. Guarded by _mutex->lock.
- * @pools: The link to 'struct ttm_pool_manager->pools'
- * @dev: The 'struct device' associated with the 'pool'
- * @pool: The 'struct dma_pool' associated with the 'dev'
- */
-struct device_pools {
- struct list_head pools;
- struct device *dev;
- struct dma_pool *pool;
-};
-
-/*
- * struct ttm_pool_manager - Holds memory pools for fast allocation
- *
- * @lock: Lock used when adding/removing from pools
- * @pools: List of 'struct device' and 'struct dma_pool' tuples.
- * @options: Limits for the pool.
- * @npools: Total amount of pools in existence.
- * @shrinker: The structure used by [un|]register_shrinker
- */
-struct ttm_pool_manager {
- struct mutex lock;
- struct list_head pools;
- struct ttm_pool_opts options;
- unsigned npools;
- struct shrinker mm_shrink;
- struct kobject kobj;
-};
-
-static struct ttm_pool_manager *_manager;
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
-
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max) {
- m->options.max_size = val;
- } else if (attr == &ttm_page_pool_small) {
- m->options.small = val;
- } else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-static int ttm_set_pages_caching(struct dma_pool *pool,
- struct page **pages, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- if (pool->type & IS_UC) {
- r = ttm_set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to uc!\n",
- pool->dev_name, cpages);
- }
- if (pool->type & IS_WC) {
- r = ttm_set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to wc!\n",
- pool->dev_name, cpages);
- }
- return r;
-}
-
-static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
-{
- unsigned long attrs = 0;
- dma_addr_t dma = d_page->dma;
- d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
- if (pool->type & IS_HUGE)
- attrs = DMA_ATTR_NO_WARN;
-
- dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
-
- kfree(d_page);
- d_page = NULL;
-}
-static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
-{
- struct dma_page *d_page;
- unsigned long attrs = 0;
- void *vaddr;
-
- d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
- if (!d_page)
- return NULL;
-
- if (pool->type & IS_HUGE)
- attrs = DMA_ATTR_NO_WARN;
-
- vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
- pool->gfp_flags, attrs);
- if (vaddr) {
- if (is_vmalloc_addr(vaddr))
- d_page->p = vmalloc_to_page(vaddr);
- else
- d_page->p = virt_to_page(vaddr);
- d_page->vaddr = (unsigned long)vaddr;
- if (pool->type & IS_HUGE)
- d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
- } else {
- kfree(d_page);
- d_page = NULL;
- }
- return d_page;
-}
-static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
-{
- enum pool_type type = IS_UNDEFINED;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- type |= IS_DMA32;
- if (cstate == tt_cached)
- type |= IS_CACHED;
- else if (cstate == tt_uncached)
- type |= IS_UC;
- else
- type |= IS_WC;
-
- return type;
-}
-
-static void ttm_pool_update_free_locked(struct dma_pool *pool,
- unsigned freed_pages)
-{
- pool->npages_free -= freed_pages;
- pool->nfrees += freed_pages;
-
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
- struct page *page = d_page->p;
- unsigned num_pages;
-
- /* Don't set WB on WB page pool. */
- if (!(pool->type & IS_CACHED)) {
- num_pages = pool->size / PAGE_SIZE;
- if (ttm_set_pages_wb(page, num_pages))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, num_pages);
- }
-
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
-}
-
-static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
- struct page *pages[], unsigned npages)
-{
- struct dma_page *d_page, *tmp;
-
- if (pool->type & IS_HUGE) {
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
- ttm_dma_page_put(pool, d_page);
-
- return;
- }
-
- /* Don't set WB on WB page pool. */
- if (npages && !(pool->type & IS_CACHED) &&
- ttm_set_pages_array_wb(pages, npages))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, npages);
-
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- }
-}
-
-/*
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @nr_free: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
- **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
- bool use_static)
-{
- static struct page *static_buf[NUM_PAGES_TO_ALLOC];
- unsigned long irq_flags;
- struct dma_page *dma_p, *tmp;
- struct page **pages_to_free;
- struct list_head d_pages;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- if (use_static)
- pages_to_free = static_buf;
- else
- pages_to_free = kmalloc_array(npages_to_free,
- sizeof(struct page *),
- GFP_KERNEL);
-
- if (!pages_to_free) {
- pr_debug("%s: Failed to allocate memory for pool free operation\n",
- pool->dev_name);
- return 0;
- }
- INIT_LIST_HEAD(&d_pages);
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- /* We picking the oldest ones off the list */
- list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
- page_list) {
- if (freed_pages >= npages_to_free)
- break;
-
- /* Move the dma_page from one list to another. */
- list_move(&dma_p->page_list, &d_pages);
-
- pages_to_free[freed_pages++] = dma_p->p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_dma_pages_put(pool, &d_pages, pages_to_free,
- freed_pages);
-
- INIT_LIST_HEAD(&d_pages);
-
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
-out:
- if (pages_to_free != static_buf)
- kfree(pages_to_free);
- return nr_free;
-}
-
-static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
-{
- struct device_pools *p;
- struct dma_pool *pool;
-
- if (!dev)
- return;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry_reverse(p, &_manager->pools, pools) {
- if (p->dev != dev)
- continue;
- pool = p->pool;
- if (pool->type != type)
- continue;
-
- list_del(&p->pools);
- kfree(p);
- _manager->npools--;
- break;
- }
- list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- /* Takes a spinlock.. */
- /* OK to use static buffer since global mutex is held. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
- WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
- /* This code path is called after _all_ references to the
- * struct device has been dropped - so nobody should be
- * touching it. In case somebody is trying to _add_ we are
- * guarded by the mutex. */
- list_del(&pool->pools);
- kfree(pool);
- break;
- }
- mutex_unlock(&_manager->lock);
-}
-
-/*
- * On free-ing of the 'struct device' this deconstructor is run.
- * Albeit the pool might have already been freed earlier.
- */
-static void ttm_dma_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- if (pool)
- ttm_dma_free_pool(dev, pool->type);
-}
-
-static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
- enum pool_type type)
-{
- const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
- enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
- struct device_pools *sec_pool = NULL;
- struct dma_pool *pool = NULL, **ptr;
- unsigned i;
- int ret = -ENODEV;
- char *p;
-
- if (!dev)
- return NULL;
-
- ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- ret = -ENOMEM;
-
- pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
- dev_to_node(dev));
- if (!pool)
- goto err_mem;
-
- sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
- dev_to_node(dev));
- if (!sec_pool)
- goto err_mem;
-
- INIT_LIST_HEAD(&sec_pool->pools);
- sec_pool->dev = dev;
- sec_pool->pool = pool;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->pools);
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->npages_free = pool->npages_in_use = 0;
- pool->nfrees = 0;
- pool->gfp_flags = flags;
- if (type & IS_HUGE)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pool->size = HPAGE_PMD_SIZE;
-#else
- BUG();
-#endif
- else
- pool->size = PAGE_SIZE;
- pool->type = type;
- pool->nrefills = 0;
- p = pool->name;
- for (i = 0; i < ARRAY_SIZE(t); i++) {
- if (type & t[i]) {
- p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
- "%s", n[i]);
- }
- }
- *p = 0;
- /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
- * - the kobj->name has already been deallocated.*/
- snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
- dev_driver_string(dev), dev_name(dev));
- mutex_lock(&_manager->lock);
- /* You can get the dma_pool from either the global: */
- list_add(&sec_pool->pools, &_manager->pools);
- _manager->npools++;
- /* or from 'struct device': */
- list_add(&pool->pools, &dev->dma_pools);
- mutex_unlock(&_manager->lock);
-
- *ptr = pool;
- devres_add(dev, ptr);
-
- return pool;
-err_mem:
- devres_free(ptr);
- kfree(sec_pool);
- kfree(pool);
- return ERR_PTR(ret);
-}
-
-static struct dma_pool *ttm_dma_find_pool(struct device *dev,
- enum pool_type type)
-{
- struct dma_pool *pool, *tmp;
-
- if (type == IS_UNDEFINED)
- return NULL;
-
- /* NB: We iterate on the 'struct dev' which has no spinlock, but
- * it does have a kref which we have taken. The kref is taken during
- * graphic driver loading - in the drm_pci_init it calls either
- * pci_dev_get or pci_register_driver which both end up taking a kref
- * on 'struct device'.
- *
- * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
- * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
- * thing is at that point of time there are no pages associated with the
- * driver so this function will not be called.
- */
- list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
- if (pool->type == type)
- return pool;
- return NULL;
-}
-
-/*
- * Free pages the pages that failed to change the caching state. If there
- * are pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
- struct list_head *d_pages,
- struct page **failed_pages,
- unsigned cpages)
-{
- struct dma_page *d_page, *tmp;
- struct page *p;
- unsigned i = 0;
-
- p = failed_pages[0];
- if (!p)
- return;
- /* Find the failed page. */
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- if (d_page->p != p)
- continue;
- /* .. and then progress over the full list. */
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- if (++i < cpages)
- p = failed_pages[i];
- else
- break;
- }
-
-}
-
-/*
- * Allocate 'count' pages, and put 'need' number of them on the
- * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
- * The full list of pages should also be on 'd_pages'.
- * We return zero for success, and negative numbers as errors.
- */
-static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
- struct list_head *d_pages,
- unsigned count)
-{
- struct page **caching_array;
- struct dma_page *dma_p;
- struct page *p;
- int r = 0;
- unsigned i, j, npages, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
- /* allocate array for page caching change */
- caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
- GFP_KERNEL);
-
- if (!caching_array) {
- pr_debug("%s: Unable to allocate table for new pages\n",
- pool->dev_name);
- return -ENOMEM;
- }
-
- if (count > 1)
- pr_debug("%s: (%s:%d) Getting %d pages\n",
- pool->dev_name, pool->name, current->pid, count);
-
- for (i = 0, cpages = 0; i < count; ++i) {
- dma_p = __ttm_dma_alloc_page(pool);
- if (!dma_p) {
- pr_debug("%s: Unable to get page %u\n",
- pool->dev_name, i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r)
- ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
- }
- r = -ENOMEM;
- goto out;
- }
- p = dma_p->p;
- list_add(&dma_p->page_list, d_pages);
-
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (PageHighMem(p))
- continue;
-#endif
-
- npages = pool->size / PAGE_SIZE;
- for (j = 0; j < npages; ++j) {
- caching_array[cpages++] = p + j;
- if (cpages == max_cpages) {
- /* Note: Cannot hold the spinlock */
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r) {
- ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
- goto out;
- }
- cpages = 0;
- }
- }
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array, cpages);
- if (r)
- ttm_dma_handle_caching_state_failure(pool, d_pages,
- caching_array, cpages);
- }
-out:
- kfree(caching_array);
- return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- */
-static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
- unsigned long *irq_flags)
-{
- unsigned count = _manager->options.small;
- int r = pool->npages_free;
-
- if (count > pool->npages_free) {
- struct list_head d_pages;
-
- INIT_LIST_HEAD(&d_pages);
-
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- /* Returns how many more are neccessary to fulfill the
- * request. */
- r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
-
- spin_lock_irqsave(&pool->lock, *irq_flags);
- if (!r) {
- /* Add the fresh to the end.. */
- list_splice(&d_pages, &pool->free_list);
- ++pool->nrefills;
- pool->npages_free += count;
- r = count;
- } else {
- struct dma_page *d_page;
- unsigned cpages = 0;
-
- pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
-
- list_for_each_entry(d_page, &d_pages, page_list) {
- cpages++;
- }
- list_splice_tail(&d_pages, &pool->free_list);
- pool->npages_free += cpages;
- r = cpages;
- }
- }
- return r;
-}
-
-/*
- * The populate list is actually a stack (not that is matters as TTM
- * allocates one page at a time.
- * return dma_page pointer if success, otherwise NULL.
- */
-static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_dma_tt *ttm_dma,
- unsigned index)
-{
- struct dma_page *d_page = NULL;
- struct ttm_tt *ttm = &ttm_dma->ttm;
- unsigned long irq_flags;
- int count;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
- if (count) {
- d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
- ttm->pages[index] = d_page->p;
- ttm_dma->dma_address[index] = d_page->dma;
- list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
- pool->npages_in_use += 1;
- pool->npages_free -= 1;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- return d_page;
-}
-
-static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
- gfp_t gfp_flags;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (huge) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
-
- if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_flags |= __GFP_RETRY_MAYFAIL;
-
- return gfp_flags;
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
- */
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct ttm_tt *ttm = &ttm_dma->ttm;
- unsigned long num_pages = ttm->num_pages;
- struct dma_pool *pool;
- struct dma_page *d_page;
- enum pool_type type;
- unsigned i;
- int ret;
-
- if (ttm_tt_is_populated(ttm))
- return 0;
-
- if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
- return -ENOMEM;
-
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- i = 0;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- goto skip_huge;
-
- pool = ttm_dma_find_pool(dev, type | IS_HUGE);
- if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
-
- pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
- if (IS_ERR_OR_NULL(pool))
- goto skip_huge;
- }
-
- while (num_pages >= HPAGE_PMD_NR) {
- unsigned j;
-
- d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (!d_page)
- break;
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size, ctx);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
-
- d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
- for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
- ttm->pages[j] = ttm->pages[j - 1] + 1;
- ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
- PAGE_SIZE;
- }
-
- i += HPAGE_PMD_NR;
- num_pages -= HPAGE_PMD_NR;
- }
-
-skip_huge:
-#endif
-
- pool = ttm_dma_find_pool(dev, type);
- if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
-
- pool = ttm_dma_pool_init(dev, gfp_flags, type);
- if (IS_ERR_OR_NULL(pool))
- return -ENOMEM;
- }
-
- while (num_pages) {
- d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (!d_page) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size, ctx);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
-
- d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
- ++i;
- --num_pages;
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return ret;
- }
- }
-
- ttm_tt_set_populated(ttm);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_populate);
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct ttm_tt *ttm = &ttm_dma->ttm;
- struct dma_pool *pool;
- struct dma_page *d_page, *next;
- enum pool_type type;
- bool is_cached = false;
- unsigned count, i, npages = 0;
- unsigned long irq_flags;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pool = ttm_dma_find_pool(dev, type | IS_HUGE);
- if (pool) {
- count = 0;
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
- page_list) {
- if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
- continue;
-
- count++;
- if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
- ttm_mem_global_free_page(mem_glob, d_page->p,
- pool->size);
- d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
- }
- ttm_dma_page_put(pool, d_page);
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- pool->npages_in_use -= count;
- pool->nfrees += count;
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- }
-#endif
-
- pool = ttm_dma_find_pool(dev, type);
- if (!pool)
- return;
-
- is_cached = (ttm_dma_find_pool(pool->dev,
- ttm_to_type(ttm->page_flags, tt_cached)) == pool);
-
- /* make sure pages array match list and count number of pages */
- count = 0;
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
- page_list) {
- ttm->pages[count] = d_page->p;
- count++;
-
- if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
- ttm_mem_global_free_page(mem_glob, d_page->p,
- pool->size);
- d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
- }
-
- if (is_cached)
- ttm_dma_page_put(pool, d_page);
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- pool->npages_in_use -= count;
- if (is_cached) {
- pool->nfrees += count;
- } else {
- pool->npages_free += count;
- list_splice(&ttm_dma->pages_list, &pool->free_list);
- /*
- * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
- * to free in order to minimize calls to set_memory_wb().
- */
- if (pool->npages_free >= (_manager->options.max_size +
- NUM_PAGES_TO_ALLOC))
- npages = pool->npages_free - _manager->options.max_size;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; i++) {
- ttm->pages[i] = NULL;
- ttm_dma->dma_address[i] = 0;
- }
-
- /* shrink pool if necessary (only on !is_cached pools)*/
- if (npages)
- ttm_dma_page_pool_free(pool, npages, false);
- ttm_tt_set_unpopulated(ttm);
-}
-EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
- * shrinkers
- */
-static unsigned long
-ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- static unsigned start_pool;
- unsigned idx = 0;
- unsigned pool_offset;
- unsigned shrink_pages = sc->nr_to_scan;
- struct device_pools *p;
- unsigned long freed = 0;
-
- if (list_empty(&_manager->pools))
- return SHRINK_STOP;
-
- if (!mutex_trylock(&_manager->lock))
- return SHRINK_STOP;
- if (!_manager->npools)
- goto out;
- pool_offset = ++start_pool % _manager->npools;
- list_for_each_entry(p, &_manager->pools, pools) {
- unsigned nr_free;
-
- if (!p->dev)
- continue;
- if (shrink_pages == 0)
- break;
- /* Do it in round-robin fashion. */
- if (++idx < pool_offset)
- continue;
- nr_free = shrink_pages;
- /* OK to use static buffer since global mutex is held. */
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
- freed += nr_free - shrink_pages;
-
- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
- p->pool->dev_name, p->pool->name, current->pid,
- nr_free, shrink_pages);
- }
-out:
- mutex_unlock(&_manager->lock);
- return freed;
-}
-
-static unsigned long
-ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct device_pools *p;
- unsigned long count = 0;
-
- if (!mutex_trylock(&_manager->lock))
- return 0;
- list_for_each_entry(p, &_manager->pools, pools)
- count += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return count;
-}
-
-static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
- manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
- manager->mm_shrink.seeks = 1;
- return register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret;
-
- WARN_ON(_manager);
-
- pr_info("Initializing DMA pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- return -ENOMEM;
-
- mutex_init(&_manager->lock);
- INIT_LIST_HEAD(&_manager->pools);
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- /* This takes care of auto-freeing the _manager */
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "dma_pool");
- if (unlikely(ret != 0))
- goto error;
-
- ret = ttm_dma_pool_mm_shrink_init(_manager);
- if (unlikely(ret != 0))
- goto error;
- return 0;
-
-error:
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
-}
-
-void ttm_dma_page_alloc_fini(void)
-{
- struct device_pools *p, *t;
-
- pr_info("Finalizing DMA pool allocator\n");
- ttm_dma_pool_mm_shrink_fini(_manager);
-
- list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
- dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
- current->pid);
- WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
- ttm_dma_pool_match, p->pool));
- ttm_dma_free_pool(p->dev, p->pool->type);
- }
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct device_pools *p;
- struct dma_pool *pool = NULL;
-
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, " pool refills pages freed inuse available name\n");
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools) {
- struct device *dev = p->dev;
- if (!dev)
- continue;
- pool = p->pool;
- seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
- pool->name, pool->nrefills,
- pool->nfrees, pool->npages_in_use,
- pool->npages_free,
- pool->dev_name);
- }
- mutex_unlock(&_manager->lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+/* Pooling of allocated pages is necessary because changing the caching
+ * attributes on x86 of the linear mapping requires a costly cross CPU TLB
+ * invalidate for those addresses.
+ *
+ * Additional to that allocations from the DMA coherent API are pooled as well
+ * cause they are rather slow compared to alloc_pages+map.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include <drm/ttm/ttm_pool.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
+
+/**
+ * struct ttm_pool_dma - Helper object for coherent DMA mappings
+ *
+ * @addr: original DMA address returned for the mapping
+ * @vaddr: original vaddr return for the mapping and order in the lower bits
+ */
+struct ttm_pool_dma {
+ dma_addr_t addr;
+ unsigned long vaddr;
+};
+
+static unsigned long page_pool_size;
+
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+module_param(page_pool_size, ulong, 0644);
+
+static atomic_long_t allocated_pages;
+
+static struct ttm_pool_type global_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_uncached[MAX_ORDER];
+
+static spinlock_t shrinker_lock;
+static struct list_head shrinker_list;
+static struct shrinker mm_shrinker;
+
+/* Allocate pages of size 1 << order with the given gfp_flags */
+static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
+ unsigned int order)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ struct page *p;
+ void *vaddr;
+
+ if (order) {
+ gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM;
+ gfp_flags &= ~__GFP_MOVABLE;
+ gfp_flags &= ~__GFP_COMP;
+ }
+
+ if (!pool->use_dma_alloc) {
+ p = alloc_pages(gfp_flags, order);
+ if (p)
+ p->private = order;
+ return p;
+ }
+
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
+ &dma->addr, gfp_flags, attr);
+ if (!vaddr)
+ goto error_free;
+
+ /* TODO: This is an illegal abuse of the DMA API, but we need to rework
+ * TTM page fault handling and extend the DMA API to clean this up.
+ */
+ if (is_vmalloc_addr(vaddr))
+ p = vmalloc_to_page(vaddr);
+ else
+ p = virt_to_page(vaddr);
+
+ dma->vaddr = (unsigned long)vaddr | order;
+ p->private = (unsigned long)dma;
+ return p;
+
+error_free:
+ kfree(dma);
+ return NULL;
+}
+
+/* Reset the caching and pages of size 1 << order */
+static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
+ unsigned int order, struct page *p)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ void *vaddr;
+
+#ifdef CONFIG_X86
+ /* We don't care that set_pages_wb is inefficient here. This is only
+ * used when we have to shrink and CPU overhead is irrelevant then.
+ */
+ if (caching != ttm_cached && !PageHighMem(p))
+ set_pages_wb(p, 1 << order);
+#endif
+
+ if (!pool->use_dma_alloc) {
+ __free_pages(p, order);
+ return;
+ }
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ dma = (void *)p->private;
+ vaddr = (void *)(dma->vaddr & PAGE_MASK);
+ dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
+ attr);
+ kfree(dma);
+}
+
+/* Apply a new caching to an array of pages */
+static int ttm_pool_apply_caching(struct page **first, struct page **last,
+ enum ttm_caching caching)
+{
+#ifdef CONFIG_X86
+ unsigned int num_pages = last - first;
+
+ if (!num_pages)
+ return 0;
+
+ switch (caching) {
+ case ttm_cached:
+ break;
+ case ttm_write_combined:
+ return set_pages_array_wc(first, num_pages);
+ case ttm_uncached:
+ return set_pages_array_uc(first, num_pages);
+ }
+#endif
+ return 0;
+}
+
+/* Map pages of 1 << order size and fill the DMA address array */
+static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr)
+{
+ dma_addr_t addr;
+ unsigned int i;
+
+ if (pool->use_dma_alloc) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ addr = dma->addr;
+ } else {
+ size_t size = (1ULL << order) * PAGE_SIZE;
+
+ addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pool->dev, **dma_addr))
+ return -EFAULT;
+ }
+
+ for (i = 1 << order; i ; --i) {
+ *(*dma_addr)++ = addr;
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/* Unmap pages of 1 << order size */
+static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
+ unsigned int num_pages)
+{
+ /* Unmapped while freeing the page */
+ if (pool->use_dma_alloc)
+ return;
+
+ dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
+ DMA_BIDIRECTIONAL);
+}
+
+/* Give pages into a specific pool_type */
+static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
+{
+ spin_lock(&pt->lock);
+ list_add(&p->lru, &pt->pages);
+ spin_unlock(&pt->lock);
+ atomic_long_add(1 << pt->order, &allocated_pages);
+}
+
+/* Take pages from a specific pool_type, return NULL when nothing available */
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+{
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
+ if (p) {
+ atomic_long_sub(1 << pt->order, &allocated_pages);
+ list_del(&p->lru);
+ }
+ spin_unlock(&pt->lock);
+
+ return p;
+}
+
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+ unsigned int count = 0;
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ /* Only used for debugfs, the overhead doesn't matter */
+ list_for_each_entry(p, &pt->pages, lru)
+ ++count;
+ spin_unlock(&pt->lock);
+
+ return count;
+}
+
+/* Initialize and add a pool type to the global shrinker list */
+static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+ enum ttm_caching caching, unsigned int order)
+{
+ pt->pool = pool;
+ pt->caching = caching;
+ pt->order = order;
+ spin_lock_init(&pt->lock);
+ INIT_LIST_HEAD(&pt->pages);
+
+ spin_lock(&shrinker_lock);
+ list_add_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+}
+
+/* Remove a pool_type from the global shrinker list and free all pages */
+static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+{
+ struct page *p, *tmp;
+
+ spin_lock(&shrinker_lock);
+ list_del(&pt->shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ list_for_each_entry_safe(p, tmp, &pt->pages, lru)
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+}
+
+/* Return the pool_type to use for the given caching and order */
+static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
+ enum ttm_caching caching,
+ unsigned int order)
+{
+ if (pool->use_dma_alloc)
+ return &pool->caching[caching].orders[order];
+
+#ifdef CONFIG_X86
+ switch (caching) {
+ case ttm_write_combined:
+ return &global_write_combined[order];
+ case ttm_uncached:
+ return &global_uncached[order];
+ default:
+ break;
+ }
+#endif
+
+ return NULL;
+}
+
+/* Free pages using the global shrinker list */
+static unsigned int ttm_pool_shrink(void)
+{
+ struct ttm_pool_type *pt;
+ unsigned int num_freed;
+ struct page *p;
+
+ spin_lock(&shrinker_lock);
+ pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+
+ p = ttm_pool_type_take(pt);
+ if (p) {
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ num_freed = 1 << pt->order;
+ } else {
+ num_freed = 0;
+ }
+
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ return num_freed;
+}
+
+/* Return the allocation order based for a page */
+static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+{
+ if (pool->use_dma_alloc) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ return dma->vaddr & ~PAGE_MASK;
+ }
+
+ return p->private;
+}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ unsigned long num_pages = tt->num_pages;
+ dma_addr_t *dma_addr = tt->dma_address;
+ struct page **caching = tt->pages;
+ struct page **pages = tt->pages;
+ gfp_t gfp_flags = GFP_USER;
+ unsigned int i, order;
+ struct page *p;
+ int r;
+
+ WARN_ON(!num_pages || ttm_tt_is_populated(tt));
+ WARN_ON(dma_addr && !pool->dev);
+
+ if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (tt->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
+ if (pool->use_dma32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= GFP_HIGHUSER;
+
+ for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+ order = min_t(unsigned int, order, __fls(num_pages))) {
+ bool apply_caching = false;
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, tt->caching, order);
+ p = pt ? ttm_pool_type_take(pt) : NULL;
+ if (p) {
+ apply_caching = true;
+ } else {
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
+ if (p && PageHighMem(p))
+ apply_caching = true;
+ }
+
+ if (!p) {
+ if (order) {
+ --order;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
+
+ if (apply_caching) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+ caching = pages + (1 << order);
+ }
+
+ r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
+ (1 << order) * PAGE_SIZE,
+ ctx);
+ if (r)
+ goto error_free_page;
+
+ if (dma_addr) {
+ r = ttm_pool_map(pool, order, p, &dma_addr);
+ if (r)
+ goto error_global_free;
+ }
+
+ num_pages -= 1 << order;
+ for (i = 1 << order; i; --i)
+ *(pages++) = p++;
+ }
+
+ r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ if (r)
+ goto error_free_all;
+
+ return 0;
+
+error_global_free:
+ ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
+
+error_free_page:
+ ttm_pool_free_page(pool, tt->caching, order, p);
+
+error_free_all:
+ num_pages = tt->num_pages - num_pages;
+ for (i = 0; i < num_pages; ) {
+ order = ttm_pool_page_order(pool, tt->pages[i]);
+ ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
+ i += 1 << order;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(ttm_pool_alloc);
+
+/**
+ * ttm_pool_free - Free the backing pages from a ttm_tt object
+ *
+ * @pool: Pool to give pages back to.
+ * @tt: ttm_tt object to unpopulate
+ *
+ * Give the packing pages back to a pool or free them
+ */
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+{
+ unsigned int i;
+
+ for (i = 0; i < tt->num_pages; ) {
+ struct page *p = tt->pages[i];
+ unsigned int order, num_pages;
+ struct ttm_pool_type *pt;
+
+ order = ttm_pool_page_order(pool, p);
+ num_pages = 1ULL << order;
+ ttm_mem_global_free_page(&ttm_mem_glob, p,
+ num_pages * PAGE_SIZE);
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+
+ pt = ttm_pool_select_type(pool, tt->caching, order);
+ if (pt)
+ ttm_pool_type_give(pt, tt->pages[i]);
+ else
+ ttm_pool_free_page(pool, tt->caching, order,
+ tt->pages[i]);
+
+ i += num_pages;
+ }
+
+ while (atomic_long_read(&allocated_pages) > page_pool_size)
+ ttm_pool_shrink();
+}
+EXPORT_SYMBOL(ttm_pool_free);
+
+/**
+ * ttm_pool_init - Initialize a pool
+ *
+ * @pool: the pool to initialize
+ * @dev: device for DMA allocations and mappings
+ * @use_dma_alloc: true if coherent DMA alloc should be used
+ * @use_dma32: true if GFP_DMA32 should be used
+ *
+ * Initialize the pool and its pool types.
+ */
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ bool use_dma_alloc, bool use_dma32)
+{
+ unsigned int i, j;
+
+ WARN_ON(!dev && use_dma_alloc);
+
+ pool->dev = dev;
+ pool->use_dma_alloc = use_dma_alloc;
+ pool->use_dma32 = use_dma32;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_init(&pool->caching[i].orders[j],
+ pool, i, j);
+}
+EXPORT_SYMBOL(ttm_pool_init);
+
+/**
+ * ttm_pool_fini - Cleanup a pool
+ *
+ * @pool: the pool to clean up
+ *
+ * Free all pages in the pool and unregister the types from the global
+ * shrinker.
+ */
+void ttm_pool_fini(struct ttm_pool *pool)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_fini(&pool->caching[i].orders[j]);
+}
+EXPORT_SYMBOL(ttm_pool_fini);
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Dump information about the different pool types */
+static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
+ struct seq_file *m)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
+ seq_puts(m, "\n");
+}
+
+/**
+ * ttm_pool_debugfs - Debugfs dump function for a pool
+ *
+ * @pool: the pool to dump the information for
+ * @m: seq_file to dump to
+ *
+ * Make a debugfs dump with the per pool and global information.
+ */
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
+{
+ unsigned int i;
+
+ spin_lock(&shrinker_lock);
+
+ seq_puts(m, "\t ");
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " ---%2u---", i);
+ seq_puts(m, "\n");
+
+ seq_puts(m, "wc\t:");
+ ttm_pool_debugfs_orders(global_write_combined, m);
+ seq_puts(m, "uc\t:");
+ ttm_pool_debugfs_orders(global_uncached, m);
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ seq_puts(m, "DMA ");
+ switch (i) {
+ case ttm_cached:
+ seq_puts(m, "\t:");
+ break;
+ case ttm_write_combined:
+ seq_puts(m, "wc\t:");
+ break;
+ case ttm_uncached:
+ seq_puts(m, "uc\t:");
+ break;
+ }
+ ttm_pool_debugfs_orders(pool->caching[i].orders, m);
+ }
+
+ seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
+ atomic_long_read(&allocated_pages), page_pool_size);
+
+ spin_unlock(&shrinker_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_debugfs);
+
+#endif
+
+/* As long as pages are available make sure to release at least one */
+static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_freed = 0;
+
+ do
+ num_freed += ttm_pool_shrink();
+ while (!num_freed && atomic_long_read(&allocated_pages));
+
+ return num_freed;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_pages = atomic_long_read(&allocated_pages);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+/**
+ * ttm_pool_mgr_init - Initialize globals
+ *
+ * @num_pages: default number of pages
+ *
+ * Initialize the global locks and lists for the MM shrinker.
+ */
+int ttm_pool_mgr_init(unsigned long num_pages)
+{
+ unsigned int i;
+
+ if (!page_pool_size)
+ page_pool_size = num_pages;
+
+ spin_lock_init(&shrinker_lock);
+ INIT_LIST_HEAD(&shrinker_list);
+
+ for (i = 0; i < MAX_ORDER; ++i) {
+ ttm_pool_type_init(&global_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+ }
+
+ mm_shrinker.count_objects = ttm_pool_shrinker_count;
+ mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker.seeks = 1;
+ return register_shrinker(&mm_shrinker);
+}
+
+/**
+ * ttm_pool_mgr_fini - Finalize globals
+ *
+ * Cleanup the global pools and unregister the MM shrinker.
+ */
+void ttm_pool_mgr_fini(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_ORDER; ++i) {
+ ttm_pool_type_fini(&global_write_combined[i]);
+ ttm_pool_type_fini(&global_uncached[i]);
+ }
+
+ unregister_shrinker(&mm_shrinker);
+ WARN_ON(!list_empty(&shrinker_list));
+}
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(bdev, man);
+ ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
return ret;
EXPORT_SYMBOL(ttm_resource_manager_init);
/*
- * ttm_resource_manager_force_list_clean
+ * ttm_resource_manager_evict_all
*
* @bdev - device to use
* @man - manager to use
*
- * Force all the objects out of a memory manager until clean.
+ * Evict all the objects out of a memory manager until it is empty.
* Part of memory manager cleanup sequence.
*/
-int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
- struct ttm_resource_manager *man)
+int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+ struct ttm_resource_manager *man)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
return 0;
}
-EXPORT_SYMBOL(ttm_resource_manager_force_list_clean);
+EXPORT_SYMBOL(ttm_resource_manager_evict_all);
/**
* ttm_resource_manager_debug
#include <linux/file.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
if (bo->ttm)
return 0;
- if (bdev->need_dma32)
- page_flags |= TTM_PAGE_FLAG_DMA32;
-
if (bdev->no_retry)
page_flags |= TTM_PAGE_FLAG_NO_RETRY;
return 0;
}
-static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
- sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
- if (!ttm->ttm.pages)
+ ttm->pages = kvmalloc_array(ttm->num_pages,
+ sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->pages)
return -ENOMEM;
- ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+
+ ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
return 0;
}
-static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+ ttm->dma_address = kvmalloc_array(ttm->num_pages,
sizeof(*ttm->dma_address),
GFP_KERNEL | __GFP_ZERO);
if (!ttm->dma_address)
return 0;
}
-static int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- int ret = 0;
-
- if (PageHighMem(p))
- return 0;
-
- if (c_old != tt_cached) {
- /* p isn't in the default caching state, set it to
- * writeback first to free its current memtype. */
-
- ret = ttm_set_pages_wb(p, 1);
- if (ret)
- return ret;
- }
-
- if (c_new == tt_wc)
- ret = ttm_set_pages_wc(p, 1);
- else if (c_new == tt_uncached)
- ret = ttm_set_pages_uc(p, 1);
-
- return ret;
-}
-
-/*
- * Change caching policy for the linear kernel map
- * for range of pages in a ttm.
- */
-
-static int ttm_tt_set_caching(struct ttm_tt *ttm,
- enum ttm_caching_state c_state)
-{
- int i, j;
- struct page *cur_page;
- int ret;
-
- if (ttm->caching_state == c_state)
- return 0;
-
- if (!ttm_tt_is_populated(ttm)) {
- /* Change caching but don't populate */
- ttm->caching_state = c_state;
- return 0;
- }
-
- if (ttm->caching_state == tt_cached)
- drm_clflush_pages(ttm->pages, ttm->num_pages);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages[i];
- if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page,
- ttm->caching_state,
- c_state);
- if (unlikely(ret != 0))
- goto out_err;
- }
- }
-
- ttm->caching_state = c_state;
-
- return 0;
-
-out_err:
- for (j = 0; j < i; ++j) {
- cur_page = ttm->pages[j];
- if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page, c_state,
- ttm->caching_state);
- }
- }
-
- return ret;
-}
-
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
-{
- enum ttm_caching_state state;
-
- if (placement & TTM_PL_FLAG_WC)
- state = tt_wc;
- else if (placement & TTM_PL_FLAG_UNCACHED)
- state = tt_uncached;
- else
- state = tt_cached;
-
- return ttm_tt_set_caching(ttm, state);
-}
-EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-
void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_unpopulate(bdev, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
- ttm->swap_storage)
+ if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
- uint32_t page_flags)
+ uint32_t page_flags,
+ enum ttm_caching caching)
{
ttm->num_pages = bo->num_pages;
- ttm->caching_state = tt_cached;
+ ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
- ttm_tt_set_unpopulated(ttm);
+ ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
+ ttm->caching = caching;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+ uint32_t page_flags, enum ttm_caching caching)
{
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching);
if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
void ttm_tt_fini(struct ttm_tt *ttm)
{
- kvfree(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm->dma_address);
ttm->pages = NULL;
+ ttm->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
+ if (ttm_dma_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
}
EXPORT_SYMBOL(ttm_dma_tt_init);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
int ret;
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
if (page_flags & TTM_PAGE_FLAG_SG)
- ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_sg_tt_alloc_page_directory(ttm);
else
- ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_dma_tt_alloc_page_directory(ttm);
if (ret) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
EXPORT_SYMBOL(ttm_sg_tt_init);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- if (ttm->pages)
- kvfree(ttm->pages);
- else
- kvfree(ttm_dma->dma_address);
- ttm->pages = NULL;
- ttm_dma->dma_address = NULL;
-}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
-
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
+ gfp_t gfp_mask;
+ int i, ret;
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
swap_space = swap_storage->f_mapping;
+ gfp_mask = mapping_gfp_mask(swap_space);
+ if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
- gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
- gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
- from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
-
+ from_page = shmem_read_mapping_page_gfp(swap_space, i,
+ gfp_mask);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
}
to_page = ttm->pages[i];
- if (unlikely(to_page == NULL))
+ if (unlikely(to_page == NULL)) {
+ ret = -ENOMEM;
goto out_err;
+ }
copy_highpage(to_page, from_page);
put_page(from_page);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
- fput(swap_storage);
+ fput(swap_storage);
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
return 0;
+
out_err:
return ret;
}
-int ttm_tt_swapout(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm, struct file *persistent_swap_storage)
+int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
-
- BUG_ON(ttm->caching_state != tt_cached);
-
- if (!persistent_swap_storage) {
- swap_storage = shmem_file_setup("ttm swap",
- ttm->num_pages << PAGE_SHIFT,
- 0);
- if (IS_ERR(swap_storage)) {
- pr_err("Failed allocating swap storage\n");
- return PTR_ERR(swap_storage);
- }
- } else {
- swap_storage = persistent_swap_storage;
+ gfp_t gfp_mask;
+ int i, ret;
+
+ swap_storage = shmem_file_setup("ttm swap",
+ ttm->num_pages << PAGE_SHIFT,
+ 0);
+ if (IS_ERR(swap_storage)) {
+ pr_err("Failed allocating swap storage\n");
+ return PTR_ERR(swap_storage);
}
swap_space = swap_storage->f_mapping;
+ gfp_mask = mapping_gfp_mask(swap_space);
+ if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
- gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
- gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
-
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- if (persistent_swap_storage)
- ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
return 0;
+
out_err:
- if (!persistent_swap_storage)
- fput(swap_storage);
+ fput(swap_storage);
return ret;
}
if (bdev->driver->ttm_tt_populate)
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else
- ret = ttm_pool_populate(ttm, ctx);
- if (!ret)
- ttm_tt_add_mapping(bdev, ttm);
- return ret;
+ ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (ret)
+ return ret;
+
+ ttm_tt_add_mapping(bdev, ttm);
+ ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_tt_unpopulate(bdev, ttm);
+ return ret;
+ }
+ }
+
+ return 0;
}
EXPORT_SYMBOL(ttm_tt_populate);
if (bdev->driver->ttm_tt_unpopulate)
bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else
- ttm_pool_unpopulate(ttm);
+ ttm_pool_free(&bdev->pool, ttm);
+ ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
}
static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
+static vm_fault_t vc4_fault(struct vm_fault *vmf);
+
static const char * const bo_type_names[] = {
"kernel",
"V3D",
return bo;
}
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+ .free = vc4_free_object,
+ .export = vc4_prime_export,
+ .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .vmap = vc4_prime_vmap,
+ .vunmap = drm_gem_cma_prime_vunmap,
+ .vm_ops = &vc4_vm_ops,
+};
+
/**
* vc4_gem_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
+ bo->base.base.funcs = &vc4_gem_object_funcs;
+
return &bo->base.base;
}
return dmabuf;
}
-vm_fault_t vc4_fault(struct vm_fault *vmf)
+static vm_fault_t vc4_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
struct drm_device *dev = crtc->dev;
}
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
kfree(vc4file);
}
-static const struct vm_operations_struct vc4_vm_ops = {
- .fault = vc4_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations vc4_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
#endif
.gem_create_object = vc4_create_object,
- .gem_free_object_unlocked = vc4_free_object,
- .gem_vm_ops = &vc4_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = vc4_prime_export,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
- .gem_prime_vmap = vc4_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = vc4_prime_mmap,
.dumb_create = vc4_dumb_create,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
return ret;
}
+static const struct component_ops vc4_dsi_ops;
static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
+ int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
return 0;
}
+ ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+ if (ret) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
return 0;
}
{
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
dev_set_drvdata(dev, dsi);
dsi->pdev = pdev;
-
- /* Note, the initialization sequence for DSI and panels is
- * tricky. The component bind above won't get past its
- * -EPROBE_DEFER until the panel/bridge probes. The
- * panel/bridge will return -EPROBE_DEFER until it has a
- * mipi_dsi_host to register its device to. So, we register
- * the host during pdev probe time, so vc4 as a whole can then
- * -EPROBE_DEFER its component bind process until the panel
- * successfully attaches.
- */
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
mipi_dsi_host_register(&dsi->dsi_host);
- ret = component_add(&pdev->dev, &vc4_dsi_ops);
- if (ret) {
- mipi_dsi_host_unregister(&dsi->dsi_host);
- return ret;
- }
-
return 0;
}
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
}
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
drm_crtc_vblank_on(crtc);
vc4_hvs_atomic_enable(crtc, old_state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+static const struct drm_gem_object_funcs vgem_gem_object_funcs;
+
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
if (!obj)
return ERR_PTR(-ENOMEM);
+ obj->base.funcs = &vgem_gem_object_funcs;
+
ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
if (ret) {
kfree(obj);
return 0;
}
+static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
+ .free = vgem_gem_free_object,
+ .pin = vgem_prime_pin,
+ .unpin = vgem_prime_unpin,
+ .get_sg_table = vgem_prime_get_sg_table,
+ .vmap = vgem_prime_vmap,
+ .vunmap = vgem_prime_vunmap,
+ .vm_ops = &vgem_gem_vm_ops,
+};
+
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
.postclose = vgem_postclose,
- .gem_free_object_unlocked = vgem_gem_free_object,
- .gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = vgem_prime_pin,
- .gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
- .gem_prime_get_sg_table = vgem_prime_get_sg_table,
- .gem_prime_vmap = vgem_prime_vmap,
- .gem_prime_vunmap = vgem_prime_vunmap,
.gem_prime_mmap = vgem_prime_mmap,
.name = DRIVER_NAME,
mutex_lock(&dev->struct_mutex);
if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
+ mutex_unlock(&dev->struct_mutex);
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
- mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \
virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
static int virtio_gpu_features(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
+ virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ if (vgdev->host_visible_region.len) {
+ seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
+ (unsigned long)vgdev->host_visible_region.addr,
+ (unsigned long)vgdev->host_visible_region.len);
+ }
return 0;
}
return 0;
}
+static int
+virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+ struct drm_printer p;
+
+ if (!vgdev->has_host_visible) {
+ seq_puts(m, "Host allocations not visible to guest\n");
+ return 0;
+ }
+
+ p = drm_seq_file_printer(m);
+ drm_mm_print(&vgdev->host_visible_mm, &p);
+ return 0;
+}
+
static struct drm_info_list virtio_gpu_debugfs_list[] = {
{ "virtio-gpu-features", virtio_gpu_features },
{ "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+ { "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm },
};
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
#endif
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
+ VIRTIO_GPU_F_RESOURCE_BLOB,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_mmap = drm_gem_prime_mmap,
- .gem_prime_export = virtgpu_gem_prime_export,
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
-#define UUID_INITIALIZING 0
-#define UUID_INITIALIZED 1
-#define UUID_INITIALIZATION_FAILED 2
+#define STATE_INITIALIZING 0
+#define STATE_OK 1
+#define STATE_ERR 2
struct virtio_gpu_object_params {
- uint32_t format;
- uint32_t width;
- uint32_t height;
unsigned long size;
bool dumb;
/* 3d */
bool virgl;
+ bool blob;
+
+ /* classic resources only */
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
uint32_t target;
uint32_t bind;
uint32_t depth;
uint32_t last_level;
uint32_t nr_samples;
uint32_t flags;
+
+ /* blob resources only */
+ uint32_t ctx_id;
+ uint32_t blob_mem;
+ uint32_t blob_flags;
+ uint64_t blob_id;
};
struct virtio_gpu_object {
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool host3d_blob, guest_blob;
+ uint32_t blob_mem, blob_flags;
int uuid_state;
uuid_t uuid;
uint32_t mapped;
};
+struct virtio_gpu_object_vram {
+ struct virtio_gpu_object base;
+ uint32_t map_state;
+ uint32_t map_info;
+ struct drm_mm_node vram_node;
+};
+
#define to_virtio_gpu_shmem(virtio_gpu_object) \
container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+#define to_virtio_gpu_vram(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
bool has_edid;
bool has_indirect;
bool has_resource_assign_uuid;
+ bool has_resource_blob;
+ bool has_host_visible;
+ struct virtio_shm_region host_visible_region;
+ struct drm_mm host_visible_mm;
struct work_struct config_changed_work;
uint32_t num_capsets;
struct list_head cap_cache;
- /* protects resource state when exporting */
+ /* protects uuid state when exporting */
spinlock_t resource_export_lock;
+ /* protects map state and host_visible_mm */
+ spinlock_t host_visible_lock;
};
struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 10
+/* virtio_ioctl.c */
+#define DRM_VIRTIO_NUM_IOCTLS 11
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs, uint64_t offset);
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo);
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_mem_entry *ents,
+ uint32_t nents);
+void
+virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id,
+ struct virtio_gpu_object *bo,
+ struct drm_framebuffer *fb,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y);
+
/* virtgpu_display.c */
int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid);
/* virtgpu_prime.c */
+int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo);
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags);
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_vram.c */
+bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
+int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr);
#endif
#include "virtgpu_drv.h"
+#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
+ VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
+ VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
switch (param->param) {
case VIRTGPU_PARAM_3D_FEATURES:
- value = vgdev->has_virgl_3d == true ? 1 : 0;
+ value = vgdev->has_virgl_3d ? 1 : 0;
break;
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
value = 1;
break;
+ case VIRTGPU_PARAM_RESOURCE_BLOB:
+ value = vgdev->has_resource_blob ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_HOST_VISIBLE:
+ value = vgdev->has_host_visible ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_CROSS_DEVICE:
+ value = vgdev->has_resource_assign_uuid ? 1 : 0;
+ break;
default:
return -EINVAL;
}
ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
+ if (qobj->host3d_blob || qobj->guest_blob)
+ ri->blob_mem = qobj->blob_mem;
+
drm_gem_object_put(gobj);
return 0;
}
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
ret = -ENOMEM;
goto err_unlock;
}
+
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, vfpriv->ctx_id, offset, args->level,
- &args->box, objs, fence);
+ (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
+ args->layer_stride, &args->box, objs, fence);
dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
objs, NULL);
} else {
virtio_gpu_create_context(dev, file);
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
- vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &args->box, objs, fence);
+ vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
+ args->stride, args->layer_stride, &args->box, objs,
+ fence);
dma_fence_put(&fence->f);
}
virtio_gpu_notify(vgdev);
return 0;
}
+static int verify_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv,
+ struct virtio_gpu_object_params *params,
+ struct drm_virtgpu_resource_create_blob *rc_blob,
+ bool *guest_blob, bool *host3d_blob)
+{
+ if (!vgdev->has_resource_blob)
+ return -EINVAL;
+
+ if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
+ !rc_blob->blob_flags)
+ return -EINVAL;
+
+ if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+ if (!vgdev->has_resource_assign_uuid)
+ return -EINVAL;
+ }
+
+ switch (rc_blob->blob_mem) {
+ case VIRTGPU_BLOB_MEM_GUEST:
+ *guest_blob = true;
+ break;
+ case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
+ *guest_blob = true;
+ fallthrough;
+ case VIRTGPU_BLOB_MEM_HOST3D:
+ *host3d_blob = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*host3d_blob) {
+ if (!vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Must be dword aligned. */
+ if (rc_blob->cmd_size % 4 != 0)
+ return -EINVAL;
+
+ params->ctx_id = vfpriv->ctx_id;
+ params->blob_id = rc_blob->blob_id;
+ } else {
+ if (rc_blob->blob_id != 0)
+ return -EINVAL;
+
+ if (rc_blob->cmd_size != 0)
+ return -EINVAL;
+ }
+
+ params->blob_mem = rc_blob->blob_mem;
+ params->size = rc_blob->size;
+ params->blob = true;
+ params->blob_flags = rc_blob->blob_flags;
+ return 0;
+}
+
+static int virtio_gpu_resource_create_blob(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t handle = 0;
+ bool guest_blob = false;
+ bool host3d_blob = false;
+ struct drm_gem_object *obj;
+ struct virtio_gpu_object *bo;
+ struct virtio_gpu_object_params params = { 0 };
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_resource_create_blob *rc_blob = data;
+
+ if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob,
+ &guest_blob, &host3d_blob))
+ return -EINVAL;
+
+ if (vgdev->has_virgl_3d)
+ virtio_gpu_create_context(dev, file);
+
+ if (rc_blob->cmd_size) {
+ void *buf;
+
+ buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
+ rc_blob->cmd_size);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
+ vfpriv->ctx_id, NULL, NULL);
+ }
+
+ if (guest_blob)
+ ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL);
+ else if (!guest_blob && host3d_blob)
+ ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+
+ bo->guest_blob = guest_blob;
+ bo->host3d_blob = host3d_blob;
+ bo->blob_mem = rc_blob->blob_mem;
+ bo->blob_flags = rc_blob->blob_flags;
+
+ obj = &bo->base.base;
+ if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+ ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+ if (ret) {
+ drm_gem_object_release(obj);
+ return ret;
+ }
+ }
+
+ ret = drm_gem_handle_create(file, obj, &handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ return ret;
+ }
+ drm_gem_object_put(obj);
+
+ rc_blob->res_handle = bo->hw_res_handle;
+ rc_blob->bo_handle = handle;
+
+ return 0;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
+ virtio_gpu_resource_create_blob,
+ DRM_RENDER_ALLOW),
};
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
+ spin_lock_init(&vgdev->host_visible_lock);
ida_init(&vgdev->ctx_id_ida);
ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
vgdev->has_resource_assign_uuid = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+ vgdev->has_resource_blob = true;
+ }
+ if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
+ if (!devm_request_mem_region(&vgdev->vdev->dev,
+ vgdev->host_visible_region.addr,
+ vgdev->host_visible_region.len,
+ dev_name(&vgdev->vdev->dev))) {
+ DRM_ERROR("Could not reserve host visible region\n");
+ goto err_vqs;
+ }
+
+ DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
+ (unsigned long)vgdev->host_visible_region.addr,
+ (unsigned long)vgdev->host_visible_region.len);
+ vgdev->has_host_visible = true;
+ drm_mm_init(&vgdev->host_visible_mm,
+ (unsigned long)vgdev->host_visible_region.addr,
+ (unsigned long)vgdev->host_visible_region.len);
+ }
- DRM_INFO("features: %cvirgl %cedid\n",
- vgdev->has_virgl_3d ? '+' : '-',
- vgdev->has_edid ? '+' : '-');
+ DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-',
+ vgdev->has_resource_blob ? '+' : '-',
+ vgdev->has_host_visible ? '+' : '-');
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
+
+ if (vgdev->has_host_visible)
+ drm_mm_takedown(&vgdev->host_visible_mm);
+
kfree(vgdev->capsets);
kfree(vgdev);
}
static int virtio_gpu_virglrenderer_workaround = 1;
module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
-static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
{
if (virtio_gpu_virglrenderer_workaround) {
/*
}
drm_gem_shmem_free_object(&bo->base.base);
+ } else if (virtio_gpu_is_vram(bo)) {
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+
+ spin_lock(&vgdev->host_visible_lock);
+ if (drm_mm_node_allocated(&vram->vram_node))
+ drm_mm_remove_node(&vram->vram_node);
+
+ spin_unlock(&vgdev->host_visible_lock);
+
+ drm_gem_free_mmap_offset(&vram->base.base.base);
+ drm_gem_object_release(&vram->base.base.base);
+ kfree(vram);
}
}
.close = virtio_gpu_gem_object_close,
.print_info = drm_gem_shmem_print_info,
+ .export = virtgpu_gem_prime_export,
.pin = drm_gem_shmem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
goto err_put_objs;
}
- if (params->virgl) {
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
- objs, fence);
- } else {
- virtio_gpu_cmd_create_resource(vgdev, bo, params,
- objs, fence);
- }
-
ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
- virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ if (params->blob) {
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
+ ents, nents);
+ } else if (params->virgl) {
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ } else {
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
+ objs, fence);
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
*bo_ptr = bo;
return 0;
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
- virtio_gpu_cmd_set_scanout(vgdev, output->index,
- bo->hw_res_handle,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
+
+ if (bo->host3d_blob || bo->guest_blob) {
+ virtio_gpu_cmd_set_scanout_blob
+ (vgdev, output->index, bo,
+ plane->state->fb,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ } else {
+ virtio_gpu_cmd_set_scanout(vgdev, output->index,
+ bo->hw_res_handle,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ }
}
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
- wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING);
- if (bo->uuid_state != UUID_INITIALIZED)
+ wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
+ if (bo->uuid_state != STATE_OK)
return -ENODEV;
uuid_copy(uuid, &bo->uuid);
.get_uuid = virtgpu_virtio_get_uuid,
};
+int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ int ret;
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+ ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags)
{
struct drm_device *dev = obj->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- struct virtio_gpu_object_array *objs;
int ret = 0;
+ bool blob = bo->host3d_blob || bo->guest_blob;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- if (vgdev->has_resource_assign_uuid) {
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return ERR_PTR(-ENOMEM);
- virtio_gpu_array_add_obj(objs, &bo->base.base);
-
- ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
- if (ret)
- return ERR_PTR(ret);
- virtio_gpu_notify(vgdev);
- } else {
- bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ if (!blob) {
+ if (vgdev->has_resource_assign_uuid) {
+ ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+ if (ret)
+ return ERR_PTR(ret);
+
+ virtio_gpu_notify(vgdev);
+ } else {
+ bo->uuid_state = STATE_ERR;
+ }
}
exp_info.ops = &virtgpu_dmabuf_ops.ops;
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
shmem->pages, DMA_TO_DEVICE);
+ }
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
uint32_t resp_type = le32_to_cpu(resp->hdr.type);
spin_lock(&vgdev->resource_export_lock);
- WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+ WARN_ON(obj->uuid_state != STATE_INITIALIZING);
if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
- obj->uuid_state == UUID_INITIALIZING) {
- memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
- obj->uuid_state = UUID_INITIALIZED;
+ obj->uuid_state == STATE_INITIALIZING) {
+ import_uuid(&obj->uuid, resp->uuid);
+ obj->uuid_state = STATE_OK;
} else {
- obj->uuid_state = UUID_INITIALIZATION_FAILED;
+ obj->uuid_state = STATE_ERR;
}
spin_unlock(&vgdev->resource_export_lock);
resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
if (!resp_buf) {
spin_lock(&vgdev->resource_export_lock);
- bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ bo->uuid_state = STATE_ERR;
spin_unlock(&vgdev->resource_export_lock);
virtio_gpu_array_put_free(objs);
return -ENOMEM;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
+
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_map_info *resp =
+ (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->host_visible_lock);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+ vram->map_info = resp->map_info;
+ vram->map_state = STATE_OK;
+ } else {
+ vram->map_state = STATE_ERR;
+ }
+
+ spin_unlock(&vgdev->host_visible_lock);
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs, uint64_t offset)
+{
+ struct virtio_gpu_resource_map_blob *cmd_p;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_map_info *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ vbuf->objs = objs;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_resource_unmap_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_mem_entry *ents,
+ uint32_t nents)
+{
+ struct virtio_gpu_resource_create_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+ cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
+ cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
+ cmd_p->blob_id = cpu_to_le64(params->blob_id);
+ cmd_p->size = cpu_to_le64(params->size);
+ cmd_p->nr_entries = cpu_to_le32(nents);
+
+ vbuf->data_buf = ents;
+ vbuf->data_size = sizeof(*ents) * nents;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
+}
+
+void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id,
+ struct virtio_gpu_object *bo,
+ struct drm_framebuffer *fb,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ uint32_t i;
+ struct virtio_gpu_set_scanout_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ uint32_t format = virtio_gpu_translate_format(fb->format->format);
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->scanout_id = cpu_to_le32(scanout_id);
+
+ cmd_p->format = cpu_to_le32(format);
+ cmd_p->width = cpu_to_le32(fb->width);
+ cmd_p->height = cpu_to_le32(fb->height);
+
+ for (i = 0; i < 4; i++) {
+ cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
+ cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
+ }
+
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include "virtgpu_drv.h"
+
+static void virtio_gpu_vram_free(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ bool unmap;
+
+ if (bo->created) {
+ spin_lock(&vgdev->host_visible_lock);
+ unmap = drm_mm_node_allocated(&vram->vram_node);
+ spin_unlock(&vgdev->host_visible_lock);
+
+ if (unmap)
+ virtio_gpu_cmd_unmap(vgdev, bo);
+
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+}
+
+static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+
+ if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return -EINVAL;
+
+ wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
+ if (vram->map_state != STATE_OK)
+ return -EINVAL;
+
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &virtio_gpu_vram_vm_ops;
+
+ if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Partial mappings of GEM buffers don't happen much in practice. */
+ if (vm_size != vram->vram_node.size)
+ return -EINVAL;
+
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ vram->vram_node.start >> PAGE_SHIFT,
+ vm_size, vma->vm_page_prot);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
+ .open = virtio_gpu_gem_object_open,
+ .close = virtio_gpu_gem_object_close,
+ .free = virtio_gpu_vram_free,
+ .mmap = virtio_gpu_vram_mmap,
+};
+
+bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_vram_funcs;
+}
+
+static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
+{
+ int ret;
+ uint64_t offset;
+ struct virtio_gpu_object_array *objs;
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+
+ if (!vgdev->has_host_visible)
+ return -EINVAL;
+
+ spin_lock(&vgdev->host_visible_lock);
+ ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
+ bo->base.base.size);
+ spin_unlock(&vgdev->host_visible_lock);
+
+ if (ret)
+ return ret;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs) {
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+ /*TODO: Add an error checking helper function in drm_mm.h */
+ offset = vram->vram_node.start - vgdev->host_visible_region.addr;
+
+ ret = virtio_gpu_cmd_map(vgdev, objs, offset);
+ if (ret) {
+ virtio_gpu_array_put_free(objs);
+ goto err_remove_node;
+ }
+
+ return 0;
+
+err_remove_node:
+ spin_lock(&vgdev->host_visible_lock);
+ drm_mm_remove_node(&vram->vram_node);
+ spin_unlock(&vgdev->host_visible_lock);
+ return ret;
+}
+
+int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr)
+{
+ struct drm_gem_object *obj;
+ struct virtio_gpu_object_vram *vram;
+ int ret;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ obj = &vram->base.base.base;
+ obj->funcs = &virtio_gpu_vram_funcs;
+ drm_gem_private_object_init(vgdev->ddev, obj, params->size);
+
+ /* Create fake offset */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret) {
+ kfree(vram);
+ return ret;
+ }
+
+ ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
+ if (ret) {
+ kfree(vram);
+ return ret;
+ }
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
+ 0);
+ if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
+ ret = virtio_gpu_vram_map(&vram->base);
+ if (ret) {
+ virtio_gpu_vram_free(obj);
+ return ret;
+ }
+ }
+
+ *bo_ptr = &vram->base;
+ return 0;
+}
vkms_plane.o \
vkms_output.o \
vkms_crtc.o \
- vkms_gem.o \
vkms_composer.o \
vkms_writeback.o
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_drv.h"
void *vaddr_out)
{
struct drm_gem_object *cursor_obj;
- struct vkms_gem_object *cursor_vkms_obj;
+ struct drm_gem_shmem_object *cursor_shmem_obj;
cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
- cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
+ cursor_shmem_obj = to_drm_gem_shmem_obj(cursor_obj);
- if (WARN_ON(!cursor_vkms_obj->vaddr))
+ if (WARN_ON(!cursor_shmem_obj->vaddr))
return;
- blend(vaddr_out, cursor_vkms_obj->vaddr,
+ blend(vaddr_out, cursor_shmem_obj->vaddr,
primary_composer, cursor_composer);
}
{
struct drm_framebuffer *fb = &primary_composer->fb;
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj);
if (!*vaddr_out) {
- *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
+ *vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL);
if (!*vaddr_out) {
DRM_ERROR("Cannot allocate memory for output frame.");
return -ENOMEM;
}
}
- if (WARN_ON(!vkms_obj->vaddr))
+ if (WARN_ON(!shmem_obj->vaddr))
return -EINVAL;
- memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
+ memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size);
if (cursor_composer)
compose_cursor(cursor_composer, primary_composer, *vaddr_out);
}
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_off(crtc);
}
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_drv.h"
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
-static const struct file_operations vkms_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = drm_gem_mmap,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .release = drm_release,
-};
-
-static const struct vm_operations_struct vkms_gem_vm_ops = {
- .fault = vkms_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
+DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
{
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
- .dumb_create = vkms_dumb_create,
- .gem_vm_ops = &vkms_gem_vm_ops,
- .gem_free_object_unlocked = vkms_gem_free_object,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = vkms_prime_import_sg_table,
+ .gem_create_object = drm_gem_shmem_create_object_cached,
+ DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- dev->mode_config.preferred_depth = 24;
+ dev->mode_config.preferred_depth = 32;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
return vkms_output_init(vkmsdev, 0);
if (ret)
goto out_devres;
+ drm_fbdev_generic_setup(&vkms_device->drm, 0);
+
return 0;
out_devres:
struct vkms_output output;
};
-struct vkms_gem_object {
- struct drm_gem_object gem;
- struct mutex pages_lock; /* Page lock used in page fault handler */
- struct page **pages;
- unsigned int vmap_count;
- void *vaddr;
-};
-
#define drm_crtc_to_vkms_output(target) \
container_of(target, struct vkms_output, crtc)
#define drm_device_to_vkms_device(target) \
container_of(target, struct vkms_device, drm)
-#define drm_gem_to_vkms_gem(target)\
- container_of(target, struct vkms_gem_object, gem)
-
#define to_vkms_crtc_state(target)\
container_of(target, struct vkms_crtc_state, base)
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index);
-/* Gem stuff */
-vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
-
-int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-void vkms_gem_free_object(struct drm_gem_object *obj);
-
-int vkms_gem_vmap(struct drm_gem_object *obj);
-
-void vkms_gem_vunmap(struct drm_gem_object *obj);
-
-/* Prime */
-struct drm_gem_object *
-vkms_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
-
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0+
-
-#include <linux/dma-buf.h>
-#include <linux/shmem_fs.h>
-#include <linux/vmalloc.h>
-#include <drm/drm_prime.h>
-
-#include "vkms_drv.h"
-
-static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
- u64 size)
-{
- struct vkms_gem_object *obj;
- int ret;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- size = roundup(size, PAGE_SIZE);
- ret = drm_gem_object_init(dev, &obj->gem, size);
- if (ret) {
- kfree(obj);
- return ERR_PTR(ret);
- }
-
- mutex_init(&obj->pages_lock);
-
- return obj;
-}
-
-void vkms_gem_free_object(struct drm_gem_object *obj)
-{
- struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
- gem);
-
- WARN_ON(gem->pages);
- WARN_ON(gem->vaddr);
-
- mutex_destroy(&gem->pages_lock);
- drm_gem_object_release(obj);
- kfree(gem);
-}
-
-vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct vkms_gem_object *obj = vma->vm_private_data;
- unsigned long vaddr = vmf->address;
- pgoff_t page_offset;
- loff_t num_pages;
- vm_fault_t ret = VM_FAULT_SIGBUS;
-
- page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
- num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
-
- if (page_offset > num_pages)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&obj->pages_lock);
- if (obj->pages) {
- get_page(obj->pages[page_offset]);
- vmf->page = obj->pages[page_offset];
- ret = 0;
- }
- mutex_unlock(&obj->pages_lock);
- if (ret) {
- struct page *page;
- struct address_space *mapping;
-
- mapping = file_inode(obj->gem.filp)->i_mapping;
- page = shmem_read_mapping_page(mapping, page_offset);
-
- if (!IS_ERR(page)) {
- vmf->page = page;
- ret = 0;
- } else {
- switch (PTR_ERR(page)) {
- case -ENOSPC:
- case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
- case -EBUSY:
- ret = VM_FAULT_RETRY;
- break;
- case -EFAULT:
- case -EINVAL:
- ret = VM_FAULT_SIGBUS;
- break;
- default:
- WARN_ON(PTR_ERR(page));
- ret = VM_FAULT_SIGBUS;
- break;
- }
- }
- }
- return ret;
-}
-
-static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size)
-{
- struct vkms_gem_object *obj;
- int ret;
-
- if (!file || !dev || !handle)
- return ERR_PTR(-EINVAL);
-
- obj = __vkms_gem_create(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- ret = drm_gem_handle_create(file, &obj->gem, handle);
- if (ret)
- return ERR_PTR(ret);
-
- return &obj->gem;
-}
-
-int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_object *gem_obj;
- u64 pitch, size;
-
- if (!args || !dev || !file)
- return -EINVAL;
-
- pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- size = pitch * args->height;
-
- if (!size)
- return -EINVAL;
-
- gem_obj = vkms_gem_create(dev, file, &args->handle, size);
- if (IS_ERR(gem_obj))
- return PTR_ERR(gem_obj);
-
- args->size = gem_obj->size;
- args->pitch = pitch;
-
- drm_gem_object_put(gem_obj);
-
- DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
-
- return 0;
-}
-
-static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
-{
- struct drm_gem_object *gem_obj = &vkms_obj->gem;
-
- if (!vkms_obj->pages) {
- struct page **pages = drm_gem_get_pages(gem_obj);
-
- if (IS_ERR(pages))
- return pages;
-
- if (cmpxchg(&vkms_obj->pages, NULL, pages))
- drm_gem_put_pages(gem_obj, pages, false, true);
- }
-
- return vkms_obj->pages;
-}
-
-void vkms_gem_vunmap(struct drm_gem_object *obj)
-{
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
-
- mutex_lock(&vkms_obj->pages_lock);
- if (vkms_obj->vmap_count < 1) {
- WARN_ON(vkms_obj->vaddr);
- WARN_ON(vkms_obj->pages);
- mutex_unlock(&vkms_obj->pages_lock);
- return;
- }
-
- vkms_obj->vmap_count--;
-
- if (vkms_obj->vmap_count == 0) {
- vunmap(vkms_obj->vaddr);
- vkms_obj->vaddr = NULL;
- drm_gem_put_pages(obj, vkms_obj->pages, false, true);
- vkms_obj->pages = NULL;
- }
-
- mutex_unlock(&vkms_obj->pages_lock);
-}
-
-int vkms_gem_vmap(struct drm_gem_object *obj)
-{
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
- int ret = 0;
-
- mutex_lock(&vkms_obj->pages_lock);
-
- if (!vkms_obj->vaddr) {
- unsigned int n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages = _get_pages(vkms_obj);
-
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto out;
- }
-
- vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
- if (!vkms_obj->vaddr)
- goto err_vmap;
- }
-
- vkms_obj->vmap_count++;
- goto out;
-
-err_vmap:
- ret = -ENOMEM;
- drm_gem_put_pages(obj, vkms_obj->pages, false, true);
- vkms_obj->pages = NULL;
-out:
- mutex_unlock(&vkms_obj->pages_lock);
- return ret;
-}
-
-struct drm_gem_object *
-vkms_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
-{
- struct vkms_gem_object *obj;
- int npages;
-
- obj = __vkms_gem_create(dev, attach->dmabuf->size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
- DRM_DEBUG_PRIME("Importing %d pages\n", npages);
-
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!obj->pages) {
- vkms_gem_free_object(&obj->gem);
- return ERR_PTR(-ENOMEM);
- }
-
- drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
- return &obj->gem;
-}
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include "vkms_drv.h"
struct drm_plane_state *state)
{
struct drm_gem_object *gem_obj;
- int ret;
+ void *vaddr;
if (!state->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(state->fb, 0);
- ret = vkms_gem_vmap(gem_obj);
- if (ret)
- DRM_ERROR("vmap failed: %d\n", ret);
+ vaddr = drm_gem_shmem_vmap(gem_obj);
+ if (IS_ERR(vaddr))
+ DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr));
return drm_gem_fb_prepare_fb(plane, state);
}
struct drm_plane_state *old_state)
{
struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
if (!old_state->fb)
return;
gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
- vkms_gem_vunmap(gem_obj);
+ shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0));
+ drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr);
}
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_XRGB8888,
static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
struct drm_writeback_job *job)
{
- struct vkms_gem_object *vkms_obj;
struct drm_gem_object *gem_obj;
- int ret;
+ void *vaddr;
if (!job->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(job->fb, 0);
- ret = vkms_gem_vmap(gem_obj);
- if (ret) {
- DRM_ERROR("vmap failed: %d\n", ret);
- return ret;
+ vaddr = drm_gem_shmem_vmap(gem_obj);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr));
+ return PTR_ERR(vaddr);
}
- vkms_obj = drm_gem_to_vkms_gem(gem_obj);
- job->priv = vkms_obj->vaddr;
+ job->priv = vaddr;
return 0;
}
return;
gem_obj = drm_gem_fb_get_obj(job->fb, 0);
- vkms_gem_vunmap(gem_obj);
+ drm_gem_shmem_vunmap(gem_obj, job->priv);
vkmsdev = drm_device_to_vkms_device(gem_obj->dev);
vkms_set_composer(&vkmsdev->output, false);
int ret = 0;
/* Buffer objects need to be either pinned or reserved: */
- if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ if (!(dst->pin_count))
dma_resv_assert_held(dst->base.resv);
- if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ if (!(src->pin_count))
dma_resv_assert_held(src->base.resv);
if (!ttm_tt_is_populated(dst->ttm)) {
d.src_pages = src->ttm->pages;
d.dst_num_pages = dst->num_pages;
d.src_num_pages = src->num_pages;
- d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
- d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+ d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
d.diff = diff;
for (j = 0; j < h; ++j) {
if (unlikely(ret != 0))
goto err;
- if (buf->pin_count > 0)
+ if (buf->base.pin_count > 0)
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
if (unlikely(ret != 0))
goto err;
- if (buf->pin_count > 0) {
+ if (buf->base.pin_count > 0) {
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
- buf->pin_count == 0) {
+ buf->base.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
- if (buf->pin_count > 0)
+ if (buf->base.pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
dma_resv_assert_held(bo->base.resv);
- if (pin) {
- if (vbo->pin_count++ > 0)
- return;
- } else {
- WARN_ON(vbo->pin_count <= 0);
- if (--vbo->pin_count > 0)
- return;
- }
+ if (pin == !!bo->pin_count)
+ return;
pl.fpfn = 0;
pl.lpfn = 0;
pl.mem_type = bo->mem.mem_type;
pl.flags = bo->mem.placement;
- if (pin)
- pl.flags |= TTM_PL_FLAG_NO_EVICT;
- else
- pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
+ if (pin)
+ ttm_bo_pin(bo);
+ else
+ ttm_bo_unpin(bo);
+}
/**
* vmw_bo_map_and_cache - Map a buffer object and cache the map
ttm_prime_object_kfree(vmw_user_bo, prime);
}
+/**
+ * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @size: size of the BO we need
+ * @placement: where to put it
+ * @p_bo: resulting BO
+ *
+ * Creates and pin a simple BO for in kernel use.
+ */
+int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
+ struct ttm_placement *placement,
+ struct ttm_buffer_object **p_bo)
+{
+ unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_buffer_object *bo;
+ size_t acc_size;
+ int ret;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (unlikely(!bo))
+ return -ENOMEM;
+
+ acc_size = ttm_round_pot(sizeof(*bo));
+ acc_size += ttm_round_pot(npages * sizeof(void *));
+ acc_size += ttm_round_pot(sizeof(struct ttm_tt));
+ ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
+ ttm_bo_type_device, placement, 0,
+ &ctx, acc_size, NULL, NULL, NULL);
+ if (unlikely(ret))
+ goto error_free;
+
+ ttm_bo_pin(bo);
+ ttm_bo_unreserve(bo);
+ *p_bo = bo;
+
+ return 0;
+
+error_free:
+ kfree(bo);
+ return ret;
+}
/**
* vmw_bo_init - Initialize a vmw buffer object
* @size: Buffer object size in bytes.
* @placement: Initial placement.
* @interruptible: Whether waits should be performed interruptible.
+ * @pin: If the BO should be created pinned at a fixed location.
* @bo_free: The buffer object destructor.
* Returns: Zero on success, negative error code on error.
*
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interruptible,
+ bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
+ struct ttm_operation_ctx ctx = { interruptible, false };
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
- ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
- 0, interruptible, acc_size,
- NULL, NULL, bo_free);
- return ret;
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, &ctx, acc_size, NULL, NULL, bo_free);
+ if (unlikely(ret))
+ return ret;
+
+ if (pin)
+ ttm_bo_pin(&vmw_bo->base);
+ ttm_bo_unreserve(&vmw_bo->base);
+ return 0;
}
ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
- &vmw_vram_sys_placement, true,
+ &vmw_vram_sys_placement, true, false,
&vmw_user_bo_destroy);
if (unlikely(ret != 0))
return ret;
{
struct vmw_buffer_object *vbo;
- if (mem == NULL)
- return;
-
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (bo->destroy != vmw_bo_bo_free &&
bo->destroy != vmw_user_bo_destroy)
!dev_priv->has_mob)
return -ENOMEM;
- ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
- &vmw_mob_ne_placement, 0, false,
- &man->cmd_space);
+ ret = vmw_bo_create_kernel(dev_priv, size,
+ &vmw_mob_placement,
+ &man->cmd_space);
if (ret)
return ret;
if (!buf)
return -ENOMEM;
- ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
- true, vmw_bo_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
+ true, true, vmw_bo_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
return -ENOMEM;
ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_ne_placement, false,
+ &vmw_sys_placement, false, true,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
return ret;
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
- (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
- ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
}
else
dev_priv->map_mode = vmw_dma_map_populate;
- if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
- (dev_priv->map_mode == vmw_dma_alloc_coherent))
- return -EINVAL;
-
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- ret = ttm_bo_device_init(&dev_priv->bdev,
- &vmw_bo_driver,
+ ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
+ dev_priv->dev->dev,
dev->anon_inode->i_mapping,
&dev_priv->vma_manager,
+ dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
if (ttm_resource_manager_used(man)) {
ttm_resource_manager_set_used(man, false);
spin_unlock(&dev_priv->svga_lock);
- if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+ if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret;
/*
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
- ttm_bo_swapout_all();
+ while (ttm_bo_swapout(&ctx) == 0);
if (dev_priv->enable_fb)
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
* @res_tree: RB tree of resources using this buffer object as a backing MOB
- * @pin_count: pin depth
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
struct vmw_buffer_object {
struct ttm_buffer_object base;
struct rb_root res_tree;
- s32 pin_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
+ unsigned long size,
+ struct ttm_placement *placement,
+ struct ttm_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interruptible,
+ bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
-extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
-extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern const struct vmw_sg_table *
ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
- false,
+ false, false,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
ttm_resource_manager_set_used(man, false);
- ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
ttm_resource_manager_cleanup(man);
* CRTC, it makes more sense to do those at plane update time.
*/
static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
* @crtc: CRTC to be turned off
*/
static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
* @start: First page of the range within the buffer object.
* @end: Last page of the range within the buffer object + 1.
*
- * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
+ * This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
*/
void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end)
ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
- interruptible,
+ interruptible, false,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out_no_bo;
mutex_lock(&dev_priv->binding_mutex);
dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
- if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+ if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
}
vbo = res->backup;
ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
- if (!vbo->pin_count) {
+ if (!vbo->base.pin_count) {
ret = ttm_bo_validate
(&vbo->base,
res->func->backup_placement,
* This is called after a mode set has been completed.
*/
static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
* @crtc: CRTC to be turned off
*/
static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct vmw_private *dev_priv;
struct vmw_screen_object_unit *sou;
*/
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_init(dev_priv, vps->bo, size,
- &vmw_vram_ne_placement,
- false, &vmw_bo_bo_free);
+ &vmw_vram_placement,
+ false, true, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */
if (unlikely(!buf))
return -ENOMEM;
- ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
- true, vmw_bo_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
+ true, true, vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out;
}
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+ ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
if (ret)
return;
spin_lock(&rman->lock);
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place vram_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place sys_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place gmr_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place mob_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
.busy_placement = &gmr_placement_flags
};
-static const struct ttm_place vram_gmr_ne_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT
- }
-};
-
-struct ttm_placement vmw_vram_gmr_ne_placement = {
- .num_placement = 2,
- .placement = vram_gmr_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &gmr_ne_placement_flags
-};
-
struct ttm_placement vmw_vram_sys_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.busy_placement = &sys_placement_flags
};
-struct ttm_placement vmw_vram_ne_placement = {
- .num_placement = 1,
- .placement = &vram_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &vram_ne_placement_flags
-};
-
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
.busy_placement = &sys_placement_flags
};
-struct ttm_placement vmw_sys_ne_placement = {
- .num_placement = 1,
- .placement = &sys_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_ne_placement_flags
-};
-
static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
.busy_placement = &mob_placement_flags
};
-struct ttm_placement vmw_mob_ne_placement = {
- .num_placement = 1,
- .num_busy_placement = 1,
- .placement = &mob_ne_placement_flags,
- .busy_placement = &mob_ne_placement_flags
-};
-
struct ttm_placement vmw_nonfixed_placement = {
.num_placement = 3,
.placement = nonfixed_placement_flags,
};
struct vmw_ttm_tt {
- struct ttm_dma_tt dma_ttm;
+ struct ttm_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
struct vmw_mob *mob;
return 0;
vsgt->mode = dev_priv->map_mode;
- vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
- vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+ vsgt->pages = vmw_tt->dma_ttm.pages;
+ vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
vsgt->sgt = &vmw_tt->sgt;
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{
struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
return &vmw_tt->vsgt;
}
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
int ret = 0;
if (!bo_mem)
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
if (!vmw_be->bound)
return;
static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
vmw_ttm_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ttm_dma_tt_fini(&vmw_be->dma_ttm);
+ ttm_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
static int vmw_ttm_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct vmw_ttm_tt *vmw_tt =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
- struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
- int ret;
-
+ /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
- size_t size =
- ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
- ret = ttm_mem_global_alloc(glob, size, ctx);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
- ctx);
- if (unlikely(ret != 0))
- ttm_mem_global_free(glob, size);
- } else
- ret = ttm_pool_populate(ttm, ctx);
-
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
- dma_ttm.ttm);
- struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-
+ dma_ttm);
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
}
vmw_ttm_unmap_dma(vmw_tt);
- if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
- size_t size =
- ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-
- ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
- ttm_mem_global_free(glob, size);
- } else
- ttm_pool_unpopulate(ttm);
+ ttm_pool_free(&bdev->pool, ttm);
}
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+ ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
else
- ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+ ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
if (unlikely(ret != 0))
goto out_no_init;
- return &vmw_be->dma_ttm.ttm;
+ return &vmw_be->dma_ttm;
out_no_init:
kfree(vmw_be);
return NULL;
mem->bus.offset = (mem->start << PAGE_SHIFT) +
dev_priv->vram_start;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_cached;
break;
default:
return -EINVAL;
bool evict,
struct ttm_resource *mem)
{
+ if (!mem)
+ return;
vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
(void) ttm_bo_wait(bo, false, false);
}
+static int vmw_move(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
+ int ret;
+
+ if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
+ if (ret)
+ return ret;
+ }
+
+ vmw_move_notify(bo, evict, new_mem);
+
+ if (old_man->use_tt && new_man->use_tt) {
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto fail;
+
+ vmw_ttm_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ } else {
+ ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ swap(*new_mem, bo->mem);
+ vmw_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ return ret;
+}
+
+static void
+vmw_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ vmw_move_notify(bo, false, NULL);
+}
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .ttm_tt_bind = &vmw_ttm_bind,
- .ttm_tt_unbind = &vmw_ttm_unbind,
.ttm_tt_destroy = &vmw_ttm_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
- .move = NULL,
+ .move = vmw_move,
.verify_access = vmw_verify_access,
- .move_notify = vmw_move_notify,
+ .delete_mem_notify = vmw_delete_mem_notify,
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
struct ttm_buffer_object *bo;
int ret;
- ret = ttm_bo_create(&dev_priv->bdev, bo_size,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, &bo);
-
+ ret = vmw_bo_create_kernel(dev_priv, bo_size,
+ &vmw_sys_placement,
+ &bo);
if (unlikely(ret != 0))
return ret;
ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt);
}
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
- if (vbo->pin_count > 0)
+ if (vbo->base.pin_count > 0)
return 0;
if (validate_as_mob)
fb_cookie);
}
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
+{
+ struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
+ int idx;
+
+ if (drm_dev_enter(obj->dev, &idx)) {
+ xen_drm_front_dbuf_destroy(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(obj));
+ drm_dev_exit(idx);
+ } else {
+ dbuf_free(&drm_info->front_info->dbuf_list,
+ xen_drm_front_dbuf_to_cookie(obj));
+ }
+
+ xen_drm_front_gem_free_object_unlocked(obj);
+}
+
static int xen_drm_drv_dumb_create(struct drm_file *filp,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
return ret;
}
-static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
-{
- struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
- int idx;
-
- if (drm_dev_enter(obj->dev, &idx)) {
- xen_drm_front_dbuf_destroy(drm_info->front_info,
- xen_drm_front_dbuf_to_cookie(obj));
- drm_dev_exit(idx);
- } else {
- dbuf_free(&drm_info->front_info->dbuf_list,
- xen_drm_front_dbuf_to_cookie(obj));
- }
-
- xen_drm_front_gem_free_object_unlocked(obj);
-}
-
static void xen_drm_drv_release(struct drm_device *dev)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
.mmap = xen_drm_front_gem_mmap,
};
-static const struct vm_operations_struct xen_drm_drv_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver xen_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.release = xen_drm_drv_release,
- .gem_vm_ops = &xen_drm_drv_vm_ops,
- .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
- .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
- .gem_prime_vmap = xen_drm_front_gem_prime_vmap,
- .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap,
.gem_prime_mmap = xen_drm_front_gem_prime_mmap,
.dumb_create = xen_drm_drv_dumb_create,
.fops = &xen_drm_dev_fops,
void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
int conn_idx, u64 fb_cookie);
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj);
+
#endif /* __XEN_DRM_FRONT_H_ */
xen_obj->pages = NULL;
}
+static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
+ .free = xen_drm_front_gem_object_free,
+ .get_sg_table = xen_drm_front_gem_get_sg_table,
+ .vmap = xen_drm_front_gem_prime_vmap,
+ .vunmap = xen_drm_front_gem_prime_vunmap,
+ .vm_ops = &xen_drm_drv_vm_ops,
+};
+
static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
size_t size)
{
if (!xen_obj)
return ERR_PTR(-ENOMEM);
+ xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
+
ret = drm_gem_object_init(dev, &xen_obj->base, size);
if (ret < 0) {
kfree(xen_obj);
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
snprintf(dma_channel_name, sizeof(dma_channel_name),
"%s%u", dma_names[layer->id], i);
- dma->chan = of_dma_request_slave_channel(disp->dev->of_node,
- dma_channel_name);
+ dma->chan = dma_request_chan(disp->dev, dma_channel_name);
if (IS_ERR(dma->chan)) {
dev_err(disp->dev, "failed to request dma channel\n");
ret = PTR_ERR(dma->chan);
static void
zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct zynqmp_disp *disp = crtc_to_disp(crtc);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
static void
zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct zynqmp_disp *disp = crtc_to_disp(crtc);
struct drm_plane_state *old_plane_state;
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = zynqmp_dpsub_dumb_create,
- .dumb_destroy = drm_gem_dumb_destroy,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
.fops = &zynqmp_dpsub_drm_fops,
}
static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
}
static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct zx_crtc *zcrtc = to_zx_crtc(crtc);
const struct zx_crtc_bits *bits = zcrtc->bits;
static int vga_switcheroo_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
mutex_lock(&vgasr_mutex);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
mutex_unlock(&vgasr_mutex);
pci_wakeup_bus(pdev->bus);
- ret = dev->bus->pm->runtime_resume(dev);
- if (ret)
- return ret;
-
- return 0;
+ return dev->bus->pm->runtime_resume(dev);
}
/**
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
- if (prot & IOMMU_CACHE)
+ /*
+ * Also Mali has its own notions of shareability wherein its Inner
+ * domain covers the cores within the GPU, and its Outer domain is
+ * "outside the GPU" (i.e. either the Inner or System domain in CPU
+ * terms, depending on coherency).
+ */
+ if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
pte |= ARM_LPAE_PTE_SH_IS;
else
pte |= ARM_LPAE_PTE_SH_OS;
cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
ARM_MALI_LPAE_TTBR_READ_INNER |
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ if (cfg->coherent_walk)
+ cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
+
return &data->iop;
out_free_data:
static void *vb2_dc_vaddr(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf_map map;
+ int ret;
- if (!buf->vaddr && buf->db_attach)
- buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+ if (!buf->vaddr && buf->db_attach) {
+ ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = ret ? NULL : map.vaddr;
+ }
return buf->vaddr;
}
return 0;
}
-static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_dc_buf *buf = dbuf->priv;
- return buf->vaddr;
+ dma_buf_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
}
static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
{
struct vb2_dc_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n");
}
if (buf->vaddr) {
- dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
static void *vb2_dma_sg_vaddr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
+ struct dma_buf_map map;
+ int ret;
BUG_ON(!buf);
if (!buf->vaddr) {
- if (buf->db_attach)
- buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
- else
+ if (buf->db_attach) {
+ ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = ret ? NULL : map.vaddr;
+ } else {
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
+ }
}
/* add offset in case userptr is not page-aligned */
return 0;
}
-static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
- return vb2_dma_sg_vaddr(buf);
+ dma_buf_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
}
static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
{
struct vb2_dma_sg_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n");
}
if (buf->vaddr) {
- dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
vb2_vmalloc_put(dbuf->priv);
}
-static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_vmalloc_buf *buf = dbuf->priv;
- return buf->vaddr;
+ dma_buf_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
}
static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
static int vb2_vmalloc_map_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
+ struct dma_buf_map map;
+ int ret;
- buf->vaddr = dma_buf_vmap(buf->dbuf);
+ ret = dma_buf_vmap(buf->dbuf, &map);
+ if (ret)
+ return -EFAULT;
+ buf->vaddr = map.vaddr;
- return buf->vaddr ? 0 : -EFAULT;
+ return 0;
}
static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
- dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ dma_buf_vunmap(buf->dbuf, &map);
buf->vaddr = NULL;
}
static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (buf->vaddr)
- dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ dma_buf_vunmap(buf->dbuf, &map);
kfree(buf);
}
kfree(a);
}
-static void *fastrpc_vmap(struct dma_buf *dmabuf)
+static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct fastrpc_buf *buf = dmabuf->priv;
- return buf->virt;
+ dma_buf_map_set_vaddr(map, buf->virt);
+
+ return 0;
}
static int fastrpc_mmap(struct dma_buf *dmabuf,
if (!fbfont)
return NULL;
- pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
+ pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
static int inverse;
-extern int fontheight_8x8;
-extern int fontwidth_8x8;
-extern unsigned char fontdata_8x8[];
-
-extern int fontheight_8x16;
-extern int fontwidth_8x16;
-extern unsigned char fontdata_8x16[];
-
/*
* struct fb_ops {
* * open/release and usage marking
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct fb_info *info = dev_get_drvdata(dev);
struct radeonfb_info *rinfo = info->par;
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct fb_info *info = dev_get_drvdata(dev);
struct radeonfb_info *rinfo = info->par;
{
int yres;
/* memory size in pixels */
- unsigned pixels = info->screen_size * 8 / var->bits_per_pixel;
+ unsigned int pixels;
struct cirrusfb_info *cinfo = info->par;
switch (var->bits_per_pixel) {
return -EINVAL;
}
+ pixels = info->screen_size * 8 / var->bits_per_pixel;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
/* use highest possible virtual resolution */
static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
- struct fb_cmap_user __user *cmap;
- struct fb_cmap32 __user *cmap32;
- __u32 data;
- int err;
-
- cmap = compat_alloc_user_space(sizeof(*cmap));
- cmap32 = compat_ptr(arg);
+ struct fb_cmap32 cmap32;
+ struct fb_cmap cmap_from;
+ struct fb_cmap_user cmap;
- if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+ if (copy_from_user(&cmap32, compat_ptr(arg), sizeof(cmap32)))
return -EFAULT;
- if (get_user(data, &cmap32->red) ||
- put_user(compat_ptr(data), &cmap->red) ||
- get_user(data, &cmap32->green) ||
- put_user(compat_ptr(data), &cmap->green) ||
- get_user(data, &cmap32->blue) ||
- put_user(compat_ptr(data), &cmap->blue) ||
- get_user(data, &cmap32->transp) ||
- put_user(compat_ptr(data), &cmap->transp))
- return -EFAULT;
+ cmap = (struct fb_cmap_user) {
+ .start = cmap32.start,
+ .len = cmap32.len,
+ .red = compat_ptr(cmap32.red),
+ .green = compat_ptr(cmap32.green),
+ .blue = compat_ptr(cmap32.blue),
+ .transp = compat_ptr(cmap32.transp),
+ };
- err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
+ if (cmd == FBIOPUTCMAP)
+ return fb_set_user_cmap(&cmap, info);
- if (!err) {
- if (copy_in_user(&cmap32->start,
- &cmap->start,
- 2 * sizeof(__u32)))
- err = -EFAULT;
- }
- return err;
+ lock_fb_info(info);
+ cmap_from = info->cmap;
+ unlock_fb_info(info);
+
+ return fb_cmap_to_user(&cmap_from, &cmap);
}
static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
static int fsl_diu_release(struct fb_info *info, int user)
{
struct mfb_info *mfbi = info->par;
- int res = 0;
spin_lock(&diu_lock);
mfbi->count--;
}
spin_unlock(&diu_lock);
- return res;
+ return 0;
}
static const struct fb_ops fsl_diu_ops = {
struct matrox_fb_info* minfo;
list_add(&drv->node, &matroxfb_driver_list);
- for (minfo = matroxfb_l(matroxfb_list.next);
- minfo != matroxfb_l(&matroxfb_list);
- minfo = matroxfb_l(minfo->next_fb.next)) {
+ list_for_each_entry(minfo, &matroxfb_list, next_fb) {
void* p;
if (minfo->drivers_count == MATROXFB_MAX_FB_DRIVERS)
struct matrox_fb_info* minfo;
list_del(&drv->node);
- for (minfo = matroxfb_l(matroxfb_list.next);
- minfo != matroxfb_l(&matroxfb_list);
- minfo = matroxfb_l(minfo->next_fb.next)) {
+ list_for_each_entry(minfo, &matroxfb_list, next_fb) {
int i;
for (i = 0; i < minfo->drivers_count; ) {
struct device *dev = mx3fb->dev;
struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
const char *name = mx3fb_pdata->name;
- unsigned int irq;
struct fb_info *fbi;
struct mx3fb_info *mx3fbi;
const struct fb_videomode *mode;
}
ichan->client = mx3fb;
- irq = ichan->eof_irq;
if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
return -EINVAL;
const char *pname;
int len;
- for (dp = NULL;
- (dp = of_get_next_child(parent, dp)) != NULL;) {
+ for_each_child_of_node(parent, dp) {
pname = of_get_property(dp, "name", NULL);
if (!pname)
continue;
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
dev_dbg(ddata->dev, "connect\n");
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.atv->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.atv->connect(in, dssdev);
}
static void tvc_disconnect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dvi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dvi->connect(in, dssdev);
}
static void dvic_disconnect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
dev_dbg(ddata->dev, "connect\n");
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.hdmi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.hdmi->connect(in, dssdev);
}
static void hdmic_disconnect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.sdi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.sdi->connect(in, dssdev);
}
static void acx565akm_disconnect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- if (!res) {
- DSSERR("can't get CORE mem resource\n");
- return -EINVAL;
- }
-
- core->base = devm_ioremap_resource(&pdev->dev, res);
+ core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base)) {
DSSERR("can't ioremap CORE\n");
return PTR_ERR(core->base);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- if (!res) {
- DSSERR("can't get CORE IORESOURCE_MEM HDMI\n");
- return -EINVAL;
- }
-
- core->base = devm_ioremap_resource(&pdev->dev, res);
+ core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base)) {
DSSERR("can't ioremap HDMI core\n");
return PTR_ERR(core->base);
int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
{
- struct resource *res;
-
phy_feat = hdmi_phy_get_features();
if (!phy_feat)
return -ENODEV;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (!res) {
- DSSERR("can't get PHY mem resource\n");
- return -EINVAL;
- }
-
- phy->base = devm_ioremap_resource(&pdev->dev, res);
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(phy->base)) {
DSSERR("can't ioremap TX PHY\n");
return PTR_ERR(phy->base);
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
- u16 r = 0;
dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
- r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
- if (r)
- return r;
-
- return 0;
+ return hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
}
static void hdmi_pll_disable(struct dss_pll *dsspll)
struct hdmi_wp_data *wp)
{
int r;
- struct resource *res;
pll->wp = wp;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
- if (!res) {
- DSSERR("can't get PLL mem resource\n");
- return -EINVAL;
- }
-
- pll->base = devm_ioremap_resource(&pdev->dev, res);
+ pll->base = devm_platform_ioremap_resource_byname(pdev, "pll");
if (IS_ERR(pll->base)) {
DSSERR("can't ioremap PLLCTRL\n");
return PTR_ERR(pll->base);
static int venc_runtime_suspend(struct device *dev)
{
- if (venc.tv_dac_clk)
- clk_disable_unprepare(venc.tv_dac_clk);
+ clk_disable_unprepare(venc.tv_dac_clk);
dispc_runtime_put();
if (r < 0)
return r;
- if (venc.tv_dac_clk)
- clk_prepare_enable(venc.tv_dac_clk);
+ clk_prepare_enable(venc.tv_dac_clk);
return 0;
}
const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
const char * const clkin_name[] = { "video1_clk", "video2_clk" };
- struct resource *res;
struct dss_video_pll *vpll;
void __iomem *pll_base, *clkctrl_base;
struct clk *clk;
/* PLL CONTROL */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]);
- if (!res) {
- dev_err(&pdev->dev,
- "missing platform resource data for pll%d\n", id);
- return ERR_PTR(-ENODEV);
- }
-
- pll_base = devm_ioremap_resource(&pdev->dev, res);
+ pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]);
if (IS_ERR(pll_base)) {
dev_err(&pdev->dev, "failed to ioremap pll%d reg_name\n", id);
return ERR_CAST(pll_base);
/* CLOCK CONTROL */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- clkctrl_name[id]);
- if (!res) {
- dev_err(&pdev->dev,
- "missing platform resource data for pll%d\n", id);
- return ERR_PTR(-ENODEV);
- }
-
- clkctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]);
if (IS_ERR(clkctrl_base)) {
dev_err(&pdev->dev, "failed to ioremap pll%d clkctrl\n", id);
return ERR_CAST(clkctrl_base);
EXPORT_SYMBOL(sbusfb_ioctl_helper);
#ifdef CONFIG_COMPAT
-static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
-{
- struct fbcmap32 __user *argp = (void __user *)arg;
- struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
- u32 addr;
- int ret;
-
- ret = copy_in_user(p, argp, 2 * sizeof(int));
- ret |= get_user(addr, &argp->red);
- ret |= put_user(compat_ptr(addr), &p->red);
- ret |= get_user(addr, &argp->green);
- ret |= put_user(compat_ptr(addr), &p->green);
- ret |= get_user(addr, &argp->blue);
- ret |= put_user(compat_ptr(addr), &p->blue);
- if (ret)
- return -EFAULT;
- return info->fbops->fb_ioctl(info,
- (cmd == FBIOPUTCMAP32) ?
- FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC,
- (unsigned long)p);
-}
-
-static int fbiogscursor(struct fb_info *info, unsigned long arg)
-{
- struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
- struct fbcursor32 __user *argp = (void __user *)arg;
- compat_uptr_t addr;
- int ret;
-
- ret = copy_in_user(p, argp,
- 2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
- ret |= get_user(addr, &argp->cmap.red);
- ret |= put_user(compat_ptr(addr), &p->cmap.red);
- ret |= get_user(addr, &argp->cmap.green);
- ret |= put_user(compat_ptr(addr), &p->cmap.green);
- ret |= get_user(addr, &argp->cmap.blue);
- ret |= put_user(compat_ptr(addr), &p->cmap.blue);
- ret |= get_user(addr, &argp->mask);
- ret |= put_user(compat_ptr(addr), &p->mask);
- ret |= get_user(addr, &argp->image);
- ret |= put_user(compat_ptr(addr), &p->image);
- if (ret)
- return -EFAULT;
- return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
-}
-
int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FBIOGATTR:
case FBIOSVIDEO:
case FBIOGVIDEO:
+ case FBIOSCURSOR32:
case FBIOGCURSOR32: /* This is not implemented yet.
Later it should be converted... */
case FBIOSCURPOS:
case FBIOGCURMAX:
return info->fbops->fb_ioctl(info, cmd, arg);
case FBIOPUTCMAP32:
- return fbiogetputcmap(info, cmd, arg);
- case FBIOGETCMAP32:
- return fbiogetputcmap(info, cmd, arg);
- case FBIOSCURSOR32:
- return fbiogscursor(info, arg);
+ case FBIOPUTCMAP_SPARC: {
+ struct fbcmap32 c;
+ struct fb_cmap cmap;
+ u16 red, green, blue;
+ u8 red8, green8, blue8;
+ unsigned char __user *ured;
+ unsigned char __user *ugreen;
+ unsigned char __user *ublue;
+ unsigned int i;
+
+ if (copy_from_user(&c, compat_ptr(arg), sizeof(c)))
+ return -EFAULT;
+ ured = compat_ptr(c.red);
+ ugreen = compat_ptr(c.green);
+ ublue = compat_ptr(c.blue);
+
+ cmap.len = 1;
+ cmap.red = &red;
+ cmap.green = &green;
+ cmap.blue = &blue;
+ cmap.transp = NULL;
+ for (i = 0; i < c.count; i++) {
+ int err;
+
+ if (get_user(red8, &ured[i]) ||
+ get_user(green8, &ugreen[i]) ||
+ get_user(blue8, &ublue[i]))
+ return -EFAULT;
+
+ red = red8 << 8;
+ green = green8 << 8;
+ blue = blue8 << 8;
+
+ cmap.start = c.index + i;
+ err = fb_set_cmap(&cmap, info);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+ case FBIOGETCMAP32: {
+ struct fbcmap32 c;
+ unsigned char __user *ured;
+ unsigned char __user *ugreen;
+ unsigned char __user *ublue;
+ struct fb_cmap *cmap = &info->cmap;
+ unsigned int index, i;
+ u8 red, green, blue;
+
+ if (copy_from_user(&c, compat_ptr(arg), sizeof(c)))
+ return -EFAULT;
+ index = c.index;
+ ured = compat_ptr(c.red);
+ ugreen = compat_ptr(c.green);
+ ublue = compat_ptr(c.blue);
+
+ if (index > cmap->len || c.count > cmap->len - index)
+ return -EINVAL;
+
+ for (i = 0; i < c.count; i++) {
+ red = cmap->red[index + i] >> 8;
+ green = cmap->green[index + i] >> 8;
+ blue = cmap->blue[index + i] >> 8;
+ if (put_user(red, &ured[i]) ||
+ put_user(green, &ugreen[i]) ||
+ put_user(blue, &ublue[i]))
+ return -EFAULT;
+ }
+ return 0;
+ }
default:
return -ENOIOCTLCMD;
}
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/fbcon.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
- if (priv->dot_clk)
- clk_prepare_enable(priv->dot_clk);
+ clk_prepare_enable(priv->dot_clk);
pm_runtime_get_sync(priv->dev);
}
}
{
if (atomic_sub_return(1, &priv->hw_usecnt) == -1) {
pm_runtime_put(priv->dev);
- if (priv->dot_clk)
- clk_disable_unprepare(priv->dot_clk);
+ clk_disable_unprepare(priv->dot_clk);
}
}
static const unsigned char SiS300_CHTVVCLKONTSC[] = { 0x2c,0x2c,0x2c,0x2c,0x2d,0x2b };
-static const unsigned char SiS300_CHTVVCLKSONTSC[] = { 0x2c,0x2c,0x2c,0x2c,0x2d,0x2b };
-
static const unsigned char SiS300_CHTVVCLKUPAL[] = { 0x2f,0x2f,0x2f,0x2f,0x2f,0x31 };
static const unsigned char SiS300_CHTVVCLKOPAL[] = { 0x2f,0x2f,0x2f,0x2f,0x30,0x32 };
#define SiS300Idle \
{ \
- while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
- while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
- while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
+ while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
+ while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
+ while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
CmdQueLen = MMIO_IN16(ivideo->mmio_vbase, 0x8240); \
}
/* (do three times, because 2D engine seems quite unsure about whether or not it's idle) */
#define SiS310Idle \
{ \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
CmdQueLen = 0; \
}
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
* drm_atomic_crtc_needs_modeset().
*/
bool allow_modeset : 1;
+ /**
+ * @legacy_cursor_update:
+ *
+ * Hint to enforce legacy cursor IOCTL semantics.
+ *
+ * WARNING: This is thoroughly broken and pretty much impossible to
+ * implement correctly. Drivers must ignore this and should instead
+ * implement &drm_plane_helper_funcs.atomic_async_check and
+ * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this
+ * flag are not allowed.
+ */
bool legacy_cursor_update : 1;
bool async_update : 1;
/**
#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2)
#define DP_AUX_I2C_REPLY_MASK (0x3 << 2)
-/* AUX CH addresses */
-/* DPCD */
+/* DPCD Field Address Mapping */
+
+/* Receiver Capability */
#define DP_DPCD_REV 0x000
# define DP_DPCD_REV_10 0x10
# define DP_DPCD_REV_11 0x11
#define DP_MAX_DOWNSPREAD 0x003
# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
+# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
# define DP_TPS4_SUPPORTED (1 << 7)
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
# define DP_CAP_ANSI_8B10B (1 << 0)
+# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
+#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */
+# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0)
+# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1)
+# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2)
+
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
+# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */
#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
-/* link configuration */
+/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
# define DP_LINK_BW_8_1 0x1e /* 1.4 */
+# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */
+
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
+# define DP_SET_ANSI_128B132B (1 << 1)
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
# define DP_LINK_QUAL_PATTERN_PRBS7 3
# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
-# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
-# define DP_LINK_QUAL_PATTERN_MASK 7
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7
+/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10
+# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18
+# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20
+# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28
+# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30
+# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
+# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
+# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
+/* Link/Sink Device Status */
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
-
-#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
-#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+/* DP 2.0 128b/132b Link Layer */
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0)
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4)
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4
+
#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
/* up to ID_SLOT_63 at 0x2ff */
+/* Source Device-specific */
#define DP_SOURCE_OUI 0x300
+
+/* Sink Device-specific */
#define DP_SINK_OUI 0x400
+
+/* Branch Device-specific */
#define DP_BRANCH_OUI 0x500
#define DP_BRANCH_ID 0x503
#define DP_BRANCH_REVISION_START 0x509
#define DP_BRANCH_HW_REV 0x509
#define DP_BRANCH_SW_REV 0x50A
+/* Link/Sink Device Power Control */
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
# define DP_SET_POWER_D3_AUX_ON 0x5
+/* eDP-specific */
#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
# define DP_EDP_11 0x00
# define DP_EDP_12 0x01
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+/* Sideband MSG Buffers */
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
+/* DPRX Event Status Indicator */
#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
/* 0-5 sink count */
# define DP_SINK_COUNT_CP_READY (1 << 6)
#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
#define DP_DP13_DPCD_REV 0x2200
-#define DP_DP13_MAX_LINK_RATE 0x2201
#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
+# define DP_UHBR10 (1 << 0)
+# define DP_UHBR20 (1 << 1)
+# define DP_UHBR13_5 (1 << 2)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+
+/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
#define DP_CEC_TUNNELING_CAPABILITY 0x3000
# define DP_CEC_TUNNELING_CAPABLE (1 << 0)
#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */
# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */
+/* HDCP 1.3 and HDCP 2.2 */
#define DP_AUX_HDCP_BKSV 0x68000
#define DP_AUX_HDCP_RI_PRIME 0x68005
#define DP_AUX_HDCP_AKSV 0x68007
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
-/* Link Training (LT)-tunable PHY Repeaters */
+/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
struct drm_gem_object;
struct drm_master;
struct drm_minor;
+struct dma_buf;
struct dma_buf_attachment;
struct drm_display_mode;
struct drm_mode_create_dumb;
struct drm_printer;
+struct sg_table;
/**
* enum drm_driver_feature - feature flags
*/
void (*debugfs_init)(struct drm_minor *minor);
- /**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * &drm_gem_object_funcs.free instead.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- /**
- * @gem_open_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.open.
- *
- * Driver hook called upon gem handle creation
- */
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * @gem_close_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.close.
- *
- * Driver hook called upon gem handle release
- */
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
/**
* @gem_create_object: constructor for gem objects
*
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
+
/**
* @prime_handle_to_fd:
*
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
- /**
- * @gem_prime_export:
- *
- * Export hook for GEM drivers. Deprecated in favour of
- * &drm_gem_object_funcs.export.
- */
- struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
- int flags);
+
/**
* @gem_prime_import:
*
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
-
- /**
- * @gem_prime_pin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.pin.
- */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_unpin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
- */
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
-
-
- /**
- * @gem_prime_get_sg_table:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
- */
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
-
/**
* @gem_prime_import_sg_table:
*
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
- /**
- * @gem_prime_vmap:
- *
- * Deprecated vmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vmap instead.
- */
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_vunmap:
- *
- * Deprecated vunmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vunmap instead.
- */
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
-
/**
* @gem_prime_mmap:
*
struct drm_device *dev,
uint32_t handle);
- /**
- * @gem_vm_ops: Driver private ops for this object
- *
- * For GEM drivers this is deprecated in favour of
- * &drm_gem_object_funcs.vm_ops.
- */
- const struct vm_operations_struct *gem_vm_ops;
-
/** @major: driver major number */
int major;
/** @minor: driver minor number */
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
*
- * The &drm_driver.gem_free_object_unlocked callback is responsible for
+ * The &drm_gem_object_funcs.free callback is responsible for
* cleaning up the dma_buf attachment and references acquired at import
* time.
*
* @placement: TTM placement information. Supported placements are \
%TTM_PL_VRAM and %TTM_PL_SYSTEM
* @placements: TTM placement information.
- * @pin_count: Pin counter
*
* The type struct drm_gem_vram_object represents a GEM object that is
* backed by VRAM. It can be used for simple framebuffer devices with
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
-
- int pin_count;
};
/**
* actual modifier used if the request doesn't have it specified,
* ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
*
+ * IMPORTANT: These implied modifiers for legacy userspace must be
+ * stored in struct &drm_framebuffer, including all relevant metadata
+ * like &drm_framebuffer.pitches and &drm_framebuffer.offsets if the
+ * modifier enables additional planes beyond the fourcc pixel format
+ * code. This is required by the GETFB2 ioctl.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
* @allow_fb_modifiers:
*
* Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ *
+ * IMPORTANT:
+ *
+ * If this is set the driver must fill out the full implicit modifier
+ * information in their &drm_mode_config_funcs.fb_create hook for legacy
+ * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is
+ * broken for modifier aware userspace.
*/
bool allow_fb_modifiers;
* @atomic_enable must be the inverse of @atomic_disable for atomic
* drivers.
*
- * Drivers can use the @old_crtc_state input parameter if the operations
- * needed to enable the CRTC don't depend solely on the new state but
- * also on the transition between the old state and the new state.
- *
* This function is optional.
*/
void (*atomic_enable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
* need to implement it if there's no need to disable anything at the
* CRTC level.
*
- * Comparing to @disable, this one provides the additional input
- * parameter @old_crtc_state which could be used to access the old
- * state. Atomic drivers should consider to use this one instead
- * of @disable.
- *
* This function is optional.
*/
void (*atomic_disable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @get_scanout_position:
struct dma_buf_export_info;
struct dma_buf;
struct dma_buf_attachment;
+struct dma_buf_map;
enum dma_data_direction;
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir);
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
* @deleted: True if the object is only a zombie and already deleted.
*/
struct ttm_resource mem;
- struct file *persistent_swap_storage;
struct ttm_tt *ttm;
bool deleted;
struct dma_fence *moving;
unsigned priority;
+ unsigned pin_count;
/**
* Special members that are protected by the reserve lock
*/
int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
+static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+}
+
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
-/**
- * ttm_bo_create
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep while waiting for GPU resources,
- * sleep interruptible.
- * @p_bo: On successful completion *p_bo points to the created object.
- *
- * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
- * on that object. The destroy function is set to kfree().
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while waiting for resources.
- */
-int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t page_alignment, bool interruptible,
- struct ttm_buffer_object **p_bo);
-
-/**
- * ttm_bo_evict_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Evicts all buffers on the lru list of the memory type.
- * This is normally part of a VT switch or an
- * out-of-memory-space-due-to-fragmentation handler.
- * The caller must make sure that there are no other processes
- * currently validating buffers, and can do that by taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: Invalid or uninitialized memory type.
- * -ERESTARTSYS: The call was interrupted by a signal while waiting to
- * evict a buffer.
- */
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
-
/**
* ttm_kmap_obj_virtual
*
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
-int ttm_bo_swapout(struct ttm_bo_global *glob,
- struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(void);
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
return bo->base.dev != NULL;
}
+/**
+ * ttm_bo_pin - Pin the buffer object.
+ * @bo: The buffer object to pin
+ *
+ * Make sure the buffer is not evicted any more during memory pressure.
+ */
+static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ ++bo->pin_count;
+}
+
+/**
+ * ttm_bo_unpin - Unpin the buffer object.
+ * @bo: The buffer object to unpin
+ *
+ * Allows the buffer object to be evicted again during memory pressure.
+ */
+static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!bo->pin_count);
+ --bo->pin_count;
+}
+
int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
#include "ttm_module.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
+#include "ttm_pool.h"
/**
* struct ttm_bo_driver
*/
void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
- /**
- * ttm_tt_bind
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_resource describing the
- * memory type and location for binding.
- *
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem);
-
- /**
- * ttm_tt_unbind
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
/**
* ttm_tt_destroy
*
struct file *filp);
/**
- * Hook to notify driver about a driver move so it
- * can do tiling things and book-keeping.
- *
- * @evict: whether this move is evicting the buffer from the graphics
- * address space
+ * Hook to notify driver about a resource delete.
*/
- void (*move_notify)(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem);
- /* notify the driver we are taking a fault on this BO
- * and have reserved it */
- int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
/**
* notify the driver that we're about to swap out this bo
* Protected by internal locks.
*/
struct drm_vma_offset_manager *vma_manager;
+ struct ttm_pool pool;
/*
* Protected by the global:lru lock.
struct delayed_work wq;
- bool need_dma32;
-
bool no_retry;
};
* @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
- * @file_page_offset: Offset into the device address space that is available
- * for buffer data. This ensures compatibility with other users of the
- * address space.
+ * @use_dma_alloc: If coherent DMA allocation API should be used.
+ * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
*
* Initializes a struct ttm_bo_device:
* Returns:
*/
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
+ struct device *dev,
struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool need_dma32);
+ bool use_dma_alloc, bool use_dma32);
/**
* ttm_bo_unmap_virtual
*/
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- *
- * The caller must take ttm_mem_io_lock before calling this function.
- */
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
/**
* ttm_bo_reserve:
*
/*
* ttm_bo_util.c
*/
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_resource *mem);
-/**
- * ttm_bo_move_ttm
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Optimized move function for a buffer object with both old and
- * new placement backed by a TTM. The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem);
/**
* ttm_bo_move_memcpy
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem);
-/**
- * ttm_bo_free_old_node
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Utility function to free an old placement after a successful move.
- */
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
-
/**
* ttm_bo_move_accel_cleanup.
*
/**
* ttm_io_prot
*
- * @c_state: Caching state.
+ * bo: ttm buffer object
+ * res: ttm resource object
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
/**
* ttm_bo_tt_bind
*/
int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
-/**
- * ttm_bo_tt_bind
- *
- * Unbind the object tt from a memory resource.
- */
-void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
-
/**
* ttm_bo_tt_destroy.
*/
--- /dev/null
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_CACHING_H_
+#define _TTM_CACHING_H_
+
+#define TTM_NUM_CACHING_TYPES 3
+
+enum ttm_caching {
+ ttm_uncached,
+ ttm_write_combined,
+ ttm_cached
+};
+
+#endif
+++ /dev/null
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- */
-#ifndef TTM_PAGE_ALLOC
-#define TTM_PAGE_ALLOC
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_memory.h>
-
-struct device;
-
-/**
- * Initialize pool allocator.
- */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-/**
- * Free pool allocator.
- */
-void ttm_page_alloc_fini(void);
-
-/**
- * ttm_pool_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_pool_unpopulate:
- *
- * @ttm: The struct ttm_tt which to free backing pages.
- *
- * Free all pages of @ttm
- */
-void ttm_pool_unpopulate(struct ttm_tt *ttm);
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
- struct ttm_operation_ctx *ctx);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-
-#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
-/**
- * Initialize pool allocator.
- */
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-
-/**
- * Free pool allocator.
- */
-void ttm_dma_page_alloc_fini(void);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
- struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
-
-#else
-static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
- unsigned max_pages)
-{
- return -ENODEV;
-}
-
-static inline void ttm_dma_page_alloc_fini(void) { return; }
-
-static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- return 0;
-}
-static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
- struct device *dev,
- struct ttm_operation_ctx *ctx)
-{
- return -ENOMEM;
-}
-static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
-{
-}
-#endif
-
-#endif
#define TTM_PL_PRIV 3
/*
- * Other flags that affects data placement.
- * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
- * if available.
- * TTM_PL_FLAG_SHARED means that another application may
- * reference the buffer.
- * TTM_PL_FLAG_NO_EVICT means that the buffer may never
- * be evicted to make room for other buffers.
* TTM_PL_FLAG_TOPDOWN requests to be placed from the
* top of the memory area, instead of the bottom.
*/
-#define TTM_PL_FLAG_CACHED (1 << 16)
-#define TTM_PL_FLAG_UNCACHED (1 << 17)
-#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
-#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
-#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
- TTM_PL_FLAG_UNCACHED | \
- TTM_PL_FLAG_WC)
-
/**
* struct ttm_place
*
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_PAGE_POOL_H_
+#define _TTM_PAGE_POOL_H_
+
+#include <linux/mmzone.h>
+#include <linux/llist.h>
+#include <linux/spinlock.h>
+#include <drm/ttm/ttm_caching.h>
+
+struct device;
+struct ttm_tt;
+struct ttm_pool;
+struct ttm_operation_ctx;
+
+/**
+ * ttm_pool_type - Pool for a certain memory type
+ *
+ * @pool: the pool we belong to, might be NULL for the global ones
+ * @order: the allocation order our pages have
+ * @caching: the caching type our pages have
+ * @shrinker_list: our place on the global shrinker list
+ * @lock: protection of the page list
+ * @pages: the list of pages in the pool
+ */
+struct ttm_pool_type {
+ struct ttm_pool *pool;
+ unsigned int order;
+ enum ttm_caching caching;
+
+ struct list_head shrinker_list;
+
+ spinlock_t lock;
+ struct list_head pages;
+};
+
+/**
+ * ttm_pool - Pool for all caching and orders
+ *
+ * @use_dma_alloc: if coherent DMA allocations should be used
+ * @use_dma32: if GFP_DMA32 should be used
+ * @caching: pools for each caching/order
+ */
+struct ttm_pool {
+ struct device *dev;
+
+ bool use_dma_alloc;
+ bool use_dma32;
+
+ struct {
+ struct ttm_pool_type orders[MAX_ORDER];
+ } caching[TTM_NUM_CACHING_TYPES];
+};
+
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx);
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
+
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_pool_fini(struct ttm_pool *pool);
+
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+
+int ttm_pool_mgr_init(unsigned long num_pages);
+void ttm_pool_mgr_fini(void);
+
+#endif
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
+#include <drm/ttm/ttm_caching.h>
#define TTM_MAX_BO_PRIORITY 4U
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
- void *addr;
- phys_addr_t offset;
- bool is_iomem;
+ void *addr;
+ phys_addr_t offset;
+ bool is_iomem;
+ enum ttm_caching caching;
};
/**
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
-int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
- struct ttm_resource_manager *man);
+int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+ struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
+++ /dev/null
-/**************************************************************************
- *
- * Copyright (c) 2018 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Huang Rui <ray.huang@amd.com>
- */
-
-#ifndef TTM_SET_MEMORY
-#define TTM_SET_MEMORY
-
-#include <linux/mm.h>
-
-#ifdef CONFIG_X86
-
-#include <asm/set_memory.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return set_pages_array_wb(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return set_pages_array_wc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return set_pages_array_uc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return set_pages_wb(page, numpages);
-}
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- unsigned long addr = (unsigned long)page_address(page);
-
- return set_memory_wc(addr, numpages);
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return set_pages_uc(page, numpages);
-}
-
-#else /* for CONFIG_X86 */
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#include <asm/agp.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
- return 0;
-}
-
-#else /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_X86 */
-
-#endif
#define _TTM_TT_H_
#include <linux/types.h>
+#include <drm/ttm/ttm_caching.h>
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8)
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
/**
* struct ttm_tt
*
* @pages: Array of pages backing the data.
+ * @page_flags: see TTM_PAGE_FLAG_*
* @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
+ * @sg: for SG objects via dma-buf
+ * @dma_address: The DMA (bus) addresses of the pages
* @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
+ * @pages_list: used by some page allocation backend
+ * @caching: The current caching state of the pages.
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
struct ttm_tt {
struct page **pages;
uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
+ uint32_t num_pages;
+ struct sg_table *sg;
+ dma_addr_t *dma_address;
struct file *swap_storage;
- enum ttm_caching_state caching_state;
+ enum ttm_caching caching;
};
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
}
-static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt)
-{
- tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
-}
-
-static inline void ttm_tt_set_populated(struct ttm_tt *tt)
-{
- tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
-}
-
-/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
-};
-
/**
* ttm_tt_create
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @caching: the desired caching state of the pages
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
+ uint32_t page_flags, enum ttm_caching caching);
+int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching);
+int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching);
/**
* ttm_tt_fini
* Free memory of ttm_tt structure
*/
void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_destroy:
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_set_placement_caching:
- *
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
- *
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
- */
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage);
+int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_populate - allocate pages for a ttm
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer to dma-buf-mapped memory, plus helpers.
+ */
+
+#ifndef __DMA_BUF_MAP_H__
+#define __DMA_BUF_MAP_H__
+
+#include <linux/io.h>
+
+/**
+ * DOC: overview
+ *
+ * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
+ * Depending on the location of the buffer, users may have to access it with
+ * I/O operations or memory load/store operations. For example, copying to
+ * system memory could be done with memcpy(), copying to I/O memory would be
+ * done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr, _iomem, src, len);
+ *
+ * When using dma-buf's vmap operation, the returned pointer is encoded as
+ * :c:type:`struct dma_buf_map <dma_buf_map>`.
+ * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
+ * system or I/O memory and a flag that signals the required method of
+ * accessing the buffer. Use the returned instance and the helper functions
+ * to access the buffer's memory in the correct way.
+ *
+ * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
+ * considered bad style. Rather then accessing its fields directly, use one
+ * of the provided helper functions, or implement your own. For example,
+ * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
+ * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
+ * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
+ *
+ * Test if a mapping is valid with either dma_buf_map_is_set() or
+ * dma_buf_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
+ * for equality with dma_buf_map_is_equal(). Mappings the point to different
+ * memory spaces, system or I/O, are never equal. That's even true if both
+ * spaces are located in the same address space, both mappings contain the
+ * same address value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map sys_map; // refers to system memory
+ * struct dma_buf_map io_map; // refers to I/O memory
+ *
+ * if (dma_buf_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * Instances of struct dma_buf_map do not have to be cleaned up, but
+ * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
+ * actually independent from the dma-buf infrastructure. When sharing buffers
+ * among devices, drivers have to know the location of the memory to access
+ * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
+ * solves this problem for dma-buf and its users. If other drivers or
+ * sub-systems require similar functionality, the type could be generalized
+ * and moved to a more prominent header file.
+ */
+
+/**
+ * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the dma-buf memory is located in I/O
+ * memory, or false otherwise.
+ */
+struct dma_buf_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
+ * @vaddr: A system-memory address
+ */
+#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
+ * @map: The dma-buf mapping structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
+ * @lhs: The dma-buf mapping structure
+ * @rhs: A dma-buf mapping structure to compare with
+ *
+ * Two dma-buf mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
+ const struct dma_buf_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
+{
+ return !dma_buf_map_is_null(map);
+}
+
+/**
+ * dma_buf_map_clear - Clears a dma-buf mapping structure
+ * @map: The dma-buf mapping structure
+ *
+ * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void dma_buf_map_clear(struct dma_buf_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+#endif /* __DMA_BUF_MAP_H__ */
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/dma-buf-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
};
/**
const struct dma_buf_ops *ops;
struct mutex lock;
unsigned vmapping_counter;
- void *vmap_ptr;
+ struct dma_buf_map vmap_ptr;
const char *exp_name;
const char *name;
spinlock_t name_lock;
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
#endif /* __DMA_BUF_H__ */
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
const void *data;
int pref;
};
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <linux/kernel.h>
-
#include <drm/drm_mode.h>
enum shmob_drm_clk_source {
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users musn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
*/
/* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
/* Intel framebuffer modifiers */
/*
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
struct drm_virtgpu_getparam {
__u64 param;
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 stride;
+ __u32 blob_mem;
};
struct drm_virtgpu_3d_box {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
#if defined(__cplusplus)
}
#endif
*/
#define VIRTIO_GPU_F_RESOURCE_UUID 2
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
+ VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
+enum virtio_gpu_shm_id {
+ VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
struct virtio_gpu_ctrl_hdr {
__u8 uuid[16];
};
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob mem */
+ __le32 blob_mem;
+ __le32 blob_flags;
+ __le32 nr_entries;
+ __le64 blob_id;
+ __le64 size;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le32 scanout_id;
+ __le32 resource_id;
+ __le32 width;
+ __le32 height;
+ __le32 format;
+ __le32 padding;
+ __le32 strides[4];
+ __le32 offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+ __le64 offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u32 map_info;
+ __u32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
#endif