Merge drm/drm-next into drm-misc-next
authorMaxime Ripard <maxime@cerno.tech>
Mon, 2 Nov 2020 10:17:54 +0000 (11:17 +0100)
committerMaxime Ripard <maxime@cerno.tech>
Mon, 2 Nov 2020 10:17:54 +0000 (11:17 +0100)
Daniel needs -rc2 in drm-misc-next to merge some patches

Signed-off-by: Maxime Ripard <maxime@cerno.tech>
306 files changed:
Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/driver-api/dma-buf.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/todo.rst
Documentation/gpu/vkms.rst
MAINTAINERS
arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-resv.c
drivers/dma-buf/heaps/heap-helpers.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/arc/arcpgu_crtc.c
drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
drivers/gpu/drm/arm/display/komeda/komeda_dev.h
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/armada/armada_gem.h
drivers/gpu/drm/aspeed/Kconfig
drivers/gpu/drm/aspeed/aspeed_gfx.h
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
drivers/gpu/drm/bridge/analogix/Kconfig
drivers/gpu/drm/bridge/analogix/Makefile
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
drivers/gpu/drm/bridge/analogix/anx7625.c [new file with mode: 0644]
drivers/gpu/drm/bridge/analogix/anx7625.h [new file with mode: 0644]
drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
drivers/gpu/drm/bridge/tc358764.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_dp_aux_dev.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fourcc.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_simple_kms_helper.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gem.h
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/hisilicon/hibmc/Makefile
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c [new file with mode: 0644]
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/imx/dcss/dcss-crtc.c
drivers/gpu/drm/imx/dcss/dcss-kms.c
drivers/gpu/drm/imx/dcss/dcss-plane.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
drivers/gpu/drm/ingenic/ingenic-drm.h
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/meson/meson_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/mxsfb/mxsfb_kms.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_gem.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mem.h
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem.h
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-novatek-nt36672a.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
drivers/gpu/drm/panel/panel-raydium-rm68200.c
drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-sitronix-st7703.c
drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
drivers/gpu/drm/panfrost/panfrost_devfreq.c
drivers/gpu/drm/panfrost/panfrost_device.c
drivers/gpu/drm/panfrost/panfrost_device.h
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/qxl/qxl_debugfs.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_object.h
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tidss/tidss_crtc.c
drivers/gpu/drm/tidss/tidss_plane.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_drv.h
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c [deleted file]
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c [deleted file]
drivers/gpu/drm/ttm/ttm_pool.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_range_manager.c
drivers/gpu/drm/ttm/ttm_resource.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/tve200/tve200_drv.c
drivers/gpu/drm/vboxvideo/vbox_mode.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vc4/vc4_txp.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/virtio/Makefile
drivers/gpu/drm/virtio/virtgpu_debugfs.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/virtio/virtgpu_prime.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/virtio/virtgpu_vram.c [new file with mode: 0644]
drivers/gpu/drm/vkms/Makefile
drivers/gpu/drm/vkms/vkms_composer.c
drivers/gpu/drm/vkms/vkms_crtc.c
drivers/gpu/drm/vkms/vkms_drv.c
drivers/gpu/drm/vkms/vkms_drv.h
drivers/gpu/drm/vkms/vkms_gem.c [deleted file]
drivers/gpu/drm/vkms/vkms_plane.c
drivers/gpu/drm/vkms/vkms_writeback.c
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front.h
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/gpu/drm/xlnx/zynqmp_disp.c
drivers/gpu/drm/xlnx/zynqmp_dpsub.c
drivers/gpu/drm/zte/zx_vou.c
drivers/gpu/vga/vga_switcheroo.c
drivers/iommu/io-pgtable-arm.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/misc/fastrpc.c
drivers/video/console/sticore.c
drivers/video/fbdev/atafb.c
drivers/video/fbdev/aty/radeon_base.c
drivers/video/fbdev/cirrusfb.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/matrox/matroxfb_base.c
drivers/video/fbdev/mx3fb.c
drivers/video/fbdev/nvidia/nv_of.c
drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
drivers/video/fbdev/omap2/omapfb/dss/venc.c
drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
drivers/video/fbdev/sbuslib.c
drivers/video/fbdev/sh_mobile_lcdcfb.c
drivers/video/fbdev/sis/300vtbl.h
drivers/video/fbdev/sis/sis_accel.h
include/drm/drm_atomic.h
include/drm/drm_dp_helper.h
include/drm/drm_drv.h
include/drm/drm_gem.h
include/drm/drm_gem_vram_helper.h
include/drm/drm_mode_config.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_prime.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_caching.h [new file with mode: 0644]
include/drm/ttm/ttm_page_alloc.h [deleted file]
include/drm/ttm/ttm_placement.h
include/drm/ttm/ttm_pool.h [new file with mode: 0644]
include/drm/ttm/ttm_resource.h
include/drm/ttm/ttm_set_memory.h [deleted file]
include/drm/ttm/ttm_tt.h
include/linux/dma-buf-map.h [new file with mode: 0644]
include/linux/dma-buf.h
include/linux/font.h
include/linux/platform_data/shmob_drm.h
include/uapi/drm/drm_fourcc.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/virtio_gpu.h

diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
new file mode 100644 (file)
index 0000000..60585a4
--- /dev/null
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Analogix Semiconductor, Inc.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/bridge/analogix,anx7625.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Analogix ANX7625 SlimPort (4K Mobile HD Transmitter)
+
+maintainers:
+  - Xin Ji <xji@analogixsemi.com>
+
+description: |
+  The ANX7625 is an ultra-low power 4K Mobile HD Transmitter
+  designed for portable devices.
+
+properties:
+  compatible:
+    items:
+      - const: analogix,anx7625
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    description: used for interrupt pin B8.
+    maxItems: 1
+
+  enable-gpios:
+    description: used for power on chip control, POWER_EN pin D2.
+    maxItems: 1
+
+  reset-gpios:
+    description: used for reset chip control, RESET_N pin B7.
+    maxItems: 1
+
+  ports:
+    type: object
+
+    properties:
+      port@0:
+        type: object
+        description:
+          Video port for MIPI DSI input.
+
+      port@1:
+        type: object
+        description:
+          Video port for panel or connector.
+
+    required:
+        - port@0
+        - port@1
+
+required:
+  - compatible
+  - reg
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        encoder@58 {
+            compatible = "analogix,anx7625";
+            reg = <0x58>;
+            enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+            reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                mipi2dp_bridge_in: port@0 {
+                    reg = <0>;
+                    anx7625_in: endpoint {
+                        remote-endpoint = <&mipi_dsi>;
+                    };
+                };
+
+                mipi2dp_bridge_out: port@1 {
+                    reg = <1>;
+                    anx7625_out: endpoint {
+                        remote-endpoint = <&panel_in>;
+                    };
+                };
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
new file mode 100644 (file)
index 0000000..d2170de
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt36672a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT36672A based DSI display Panels
+
+maintainers:
+  - Sumit Semwal <sumit.semwal@linaro.org>
+
+description: |
+  The nt36672a IC from Novatek is a generic DSI Panel IC used to drive dsi
+  panels.
+  Right now, support is added only for a Tianma FHD+ LCD display panel with a
+  resolution of 1080x2246. It is a video mode DSI panel.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+         - tianma,fhd-video
+      - const: novatek,nt36672a
+    description: This indicates the panel manufacturer of the panel that is
+      in turn using the NT36672A panel driver. This compatible string
+      determines how the NT36672A panel driver is configured for the indicated
+      panel. The novatek,nt36672a compatible shall always be provided as a fallback.
+
+  reset-gpios:
+    description: phandle of gpio for reset line - This should be 8mA, gpio
+      can be configured using mux, pinctrl, pinctrl-names (active high)
+
+  vddio-supply:
+    description: phandle of the regulator that provides the supply voltage
+      Power IC supply
+
+  vddpos-supply:
+    description: phandle of the positive boost supply regulator
+
+  vddneg-supply:
+    description: phandle of the negative boost supply regulator
+
+  reg: true
+  port: true
+
+required:
+  - compatible
+  - reg
+  - vddi0-supply
+  - vddpos-supply
+  - vddneg-supply
+  - reset-gpios
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |+
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "tianma,fhd-video", "novatek,nt36672a";
+            reg = <0>;
+            vddi0-supply = <&vreg_l14a_1p88>;
+            vddpos-supply = <&lab>;
+            vddneg-supply = <&ibb>;
+
+            reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
+
+            #address-cells = <1>;
+            #size-cells = <0>;
+            port {
+                tianma_nt36672a_in_0: endpoint {
+                    remote-endpoint = <&dsi0_out>;
+                };
+            };
+        };
+    };
+
+...
index c0dd9fa29f1d215f6e9856ff2f234b65615975b1..a29ab65507f0a2fcb38226aa6942c37624f2dfdc 100644 (file)
@@ -47,6 +47,8 @@ properties:
       - panasonic,vvx10f004b00
         # Panasonic 10" WUXGA TFT LCD panel
       - panasonic,vvx10f034n00
+        # Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel
+      - tdo,tl070wsh30
 
   reg:
     maxItems: 1
@@ -54,6 +56,7 @@ properties:
 
   backlight: true
   enable-gpios: true
+  reset-gpios: true
   port: true
   power-supply: true
 
index edb53ab0d9eb26aa9640169c1d862f52ffd9b0f5..f9750b0b6708a617933ae12af8d6e49e297e9307 100644 (file)
@@ -282,6 +282,8 @@ properties:
       - vxt,vl050-8048nt-c01
         # Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
       - winstar,wf35ltiacd
+        # Yes Optoelectronics YTC700TLAG-05-201C 7" TFT LCD panel
+      - yes-optoelectronics,ytc700tlag-05-201c
 
   backlight: true
   enable-gpios: true
index 4f9185462ed3d7bece62544b4501230412eabb38..4dc30738ee576cc579f14294e72723aa0cba9886 100644 (file)
@@ -55,6 +55,14 @@ properties:
       - const: vp1
       - const: vp2
 
+  assigned-clocks:
+    minItems: 1
+    maxItems: 3
+
+  assigned-clock-parents:
+    minItems: 1
+    maxItems: 3
+
   interrupts:
     maxItems: 1
 
@@ -62,6 +70,9 @@ properties:
     maxItems: 1
     description: phandle to the associated power domain
 
+  dma-coherent:
+    type: boolean
+
   ports:
     type: object
     description:
index 173730d56334d64c6978597d382b76ccbd64c0d1..c9a947d55fa4a5999c45c5430dbbb5ba237ad085 100644 (file)
@@ -77,6 +77,14 @@ properties:
       - const: vp3
       - const: vp4
 
+  assigned-clocks:
+    minItems: 1
+    maxItems: 5
+
+  assigned-clock-parents:
+    minItems: 1
+    maxItems: 5
+
   interrupts:
     items:
       - description: common_m DSS Master common
@@ -95,6 +103,9 @@ properties:
     maxItems: 1
     description: phandle to the associated power domain
 
+  dma-coherent:
+    type: boolean
+
   ports:
     type: object
     description:
index 2735be1a84709587e36ef3f0cb855d14ad6c0391..12e1419823d4d2eac2962926d21f714a88ea736a 100644 (file)
@@ -1053,6 +1053,8 @@ patternProperties:
     description: Trusted Computing Group
   "^tcl,.*":
     description: Toby Churchill Ltd.
+  "^tdo,.*":
+    description: Shangai Top Display Optoelectronics Co., Ltd
   "^technexion,.*":
     description: TechNexion
   "^technologic,.*":
@@ -1210,6 +1212,8 @@ patternProperties:
     description: Shenzhen Xunlong Software CO.,Limited
   "^xylon,.*":
     description: Xylon
+  "^yes-optoelectronics,.*":
+    description: Yes Optoelectronics Co.,Ltd.
   "^ylm,.*":
     description: Shenzhen Yangliming Electronic Technology Co., Ltd.
   "^yna,.*":
index 4144b669e80c14b3c15bcbfae367da205fc7ba85..d6b2a195dbed87ec4a67efb92435b947faaddc02 100644 (file)
@@ -115,6 +115,15 @@ Kernel Functions and Structures Reference
 .. kernel-doc:: include/linux/dma-buf.h
    :internal:
 
+Buffer Mapping Helpers
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/dma-buf-map.h
+   :doc: overview
+
+.. kernel-doc:: include/linux/dma-buf-map.h
+   :internal:
+
 Reservation Objects
 -------------------
 
index 9abee1589c1e3d1af492ed527a53b84a26284726..21be6deadc120b1ef4a22cf76eedbfe357b52a06 100644 (file)
@@ -182,11 +182,11 @@ acquired and release by calling drm_gem_object_get() and drm_gem_object_put()
 respectively.
 
 When the last reference to a GEM object is released the GEM core calls
-the :c:type:`struct drm_driver <drm_driver>` gem_free_object_unlocked
+the :c:type:`struct drm_gem_object_funcs <gem_object_funcs>` free
 operation. That operation is mandatory for GEM-enabled drivers and must
 free the GEM object and all associated resources.
 
-void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
+void (\*free) (struct drm_gem_object \*obj); Drivers are
 responsible for freeing all GEM object resources. This includes the
 resources created by the GEM core, which need to be released with
 drm_gem_object_release().
index b0ea17da8ff6386e6ab4f3beb16a4ea65a81388c..6b224ef14455e7f996297758e8ee7eaecb798783 100644 (file)
@@ -105,6 +105,10 @@ converted over to the new infrastructure.
 One issue with the helpers is that they require that drivers handle completion
 events for atomic commits correctly. But fixing these bugs is good anyway.
 
+Somewhat related is the legacy_cursor_update hack, which should be replaced with
+the new atomic_async_check/commit functionality in the helpers in drivers that
+still look at that flag.
+
 Contact: Daniel Vetter, respective driver maintainers
 
 Level: Advanced
@@ -149,7 +153,7 @@ have to keep track of that lock and either call ``unreference`` or
 ``unreference_locked`` depending upon context.
 
 Core GEM doesn't have a need for ``struct_mutex`` any more since kernel 4.8,
-and there's a ``gem_free_object_unlocked`` callback for any drivers which are
+and there's a GEM object ``free`` callback for any drivers which are
 entirely ``struct_mutex`` free.
 
 For drivers that need ``struct_mutex`` it should be replaced with a driver-
@@ -289,11 +293,8 @@ struct drm_gem_object_funcs
 ---------------------------
 
 GEM objects can now have a function table instead of having the callbacks on the
-DRM driver struct. This is now the preferred way and drivers can be moved over.
-
-We also need a 2nd version of the CMA define that doesn't require the
-vmapping to be present (different hook for prime importing). Plus this needs to
-be rolled out to all drivers using their own implementations, too.
+DRM driver struct. This is now the preferred way. Callbacks in drivers have been
+converted, except for struct drm_driver.gem_prime_mmap.
 
 Level: Intermediate
 
@@ -518,9 +519,6 @@ There's a bunch of issues with it:
   this (together with the drm_minor->drm_device move) would allow us to remove
   debugfs_init.
 
-- Drop the return code and error checking from all debugfs functions. Greg KH is
-  working on this already.
-
 Contact: Daniel Vetter
 
 Level: Intermediate
index 61586fc861bbbb6e92a651956aff7eaa18456feb..13bab1d93bb3bb20a9ccb9ae7535071ebd82dc0e 100644 (file)
 TODO
 ====
 
-CRC API Improvements
---------------------
-
-- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
-
-- Use the alpha value to blend vaddr_src with vaddr_dst instead of
-  overwriting it in ``blend()``.
-
-- Add igt test to check cleared alpha value for XRGB plane format.
-
-- Add igt test to check extreme alpha values i.e. fully opaque and fully
-  transparent (intermediate values are affected by hw-specific rounding modes).
-
-Runtime Configuration
----------------------
-
-We want to be able to reconfigure vkms instance without having to reload the
-module. Use/Test-cases:
-
-- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
-  compositors).
+If you want to do any of the items listed below, please share your interest
+with VKMS maintainers.
 
-- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
-  them first).
+IGT better support
+------------------
 
-- Change output configuration: Plug/unplug screens, change EDID, allow changing
-  the refresh rate.
+- Investigate: (1) test cases on kms_plane that are failing due to timeout on
+  capturing CRC; (2) when running kms_flip test cases in sequence, some
+  successful individual test cases are failing randomly.
 
-The currently proposed solution is to expose vkms configuration through
-configfs.  All existing module options should be supported through configfs too.
+- VKMS already has support for vblanks simulated via hrtimers, which can be
+  tested with kms_flip test; in some way, we can say that VKMS already mimics
+  the real hardware vblank. However, we also have virtual hardware that does
+  not support vblank interrupt and completes page_flip events right away; in
+  this case, compositor developers may end up creating a busy loop on virtual
+  hardware. It would be useful to support Virtual Hardware behavior in VKMS
+  because this can help compositor developers to test their features in
+  multiple scenarios.
 
 Add Plane Features
 ------------------
@@ -55,34 +43,50 @@ There's lots of plane features we could add support for:
 - Additional buffer formats, especially YUV formats for video like NV12.
   Low/high bpp RGB formats would also be interesting.
 
-- Async updates (currently only possible on cursor plane using the legacy cursor
-  api).
+- Async updates (currently only possible on cursor plane using the legacy
+  cursor api).
+
+For all of these, we also want to review the igt test coverage and make sure
+all relevant igt testcases work on vkms.
+
+Prime Buffer Sharing
+--------------------
 
-For all of these, we also want to review the igt test coverage and make sure all
-relevant igt testcases work on vkms.
+- Syzbot report - WARNING in vkms_gem_free_object:
+  https://syzkaller.appspot.com/bug?extid=e7ad70d406e74d8fc9d0
+
+Runtime Configuration
+---------------------
+
+We want to be able to reconfigure vkms instance without having to reload the
+module. Use/Test-cases:
+
+- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling
+  of compositors).
+
+- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
+  them first).
+
+- Change output configuration: Plug/unplug screens, change EDID, allow changing
+  the refresh rate.
+
+The currently proposed solution is to expose vkms configuration through
+configfs.  All existing module options should be supported through configfs
+too.
 
 Writeback support
 -----------------
 
-Currently vkms only computes a CRC for each frame. Once we have additional plane
-features, we could write back the entire composited frame, and expose it as:
+- The writeback and CRC capture operations share the use of composer_enabled
+  boolean to ensure vblanks. Probably, when these operations work together,
+  composer_enabled needs to refcounting the composer state to proper work.
 
-- Writeback connector. This is useful for testing compositors if you don't have
-  hardware with writeback support.
+- Add support for cloned writeback outputs and related test cases using a
+  cloned output in the IGT kms_writeback.
 
 - As a v4l device. This is useful for debugging compositors on special vkms
   configurations, so that developers see what's really going on.
 
-Prime Buffer Sharing
---------------------
-
-We already have vgem, which is a gem driver for testing rendering, similar to
-how vkms is for testing the modeset side. Adding buffer sharing support to vkms
-allows us to test them together, to test synchronization and lots of other
-features. Also, this allows compositors to test whether they work correctly on
-SoC chips, where the display and rendering is very often split between 2
-drivers.
-
 Output Features
 ---------------
 
@@ -93,7 +97,10 @@ Output Features
 - Add support for link status, so that compositors can validate their runtime
   fallbacks when e.g. a Display Port link goes bad.
 
-- All the hotplug handling describe under "Runtime Configuration".
+CRC API Improvements
+--------------------
+
+- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
 
 Atomic Check using eBPF
 -----------------------
index b516bb34a8d5a95f2be58fa2832c789a56fca450..71e29dc0ab9d9860b77104bd80919d4befb0e00f 100644 (file)
@@ -5576,6 +5576,13 @@ T:       git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
 F:     drivers/gpu/drm/panel/panel-novatek-nt35510.c
 
+DRM DRIVER FOR NOVATEK NT36672A PANELS
+M:     Sumit Semwal <sumit.semwal@linaro.org>
+S:     Maintained
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+F:     Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+F:     drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+
 DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
 M:     Ben Skeggs <bskeggs@redhat.com>
 L:     dri-devel@lists.freedesktop.org
@@ -5955,6 +5962,7 @@ F:        include/uapi/drm/v3d_drm.h
 
 DRM DRIVERS FOR VC4
 M:     Eric Anholt <eric@anholt.net>
+M:     Maxime Ripard <mripard@kernel.org>
 S:     Supported
 T:     git git://github.com/anholt/linux
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6911,10 +6919,9 @@ F:       drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
-M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     dri-devel@lists.freedesktop.org
 L:     linux-fbdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 Q:     http://patchwork.kernel.org/project/linux-fbdev/list/
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/fb/
index 9b8548e5f6e51194bff5929281f15491df42b69a..ee8fcae9f9f00aa833f3e67288859bbe6b6ddcfb 100644 (file)
                };
        };
 };
+
+&mali {
+       dma-coherent;
+};
index 844967f98866a5749bf54367f98131c7df9d5370..556f62e8b1960d49354f6b0d323c4298bf78cbeb 100644 (file)
@@ -851,6 +851,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin);
  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
  * on error. May return -EINTR if it is interrupted by a signal.
  *
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
  * the underlying backing storage is pinned for as long as a mapping exists,
  * therefore users/importers should not hold onto a mapping for undue amounts of
@@ -904,6 +907,24 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
                attach->dir = direction;
        }
 
+#ifdef CONFIG_DMA_API_DEBUG
+       {
+               struct scatterlist *sg;
+               u64 addr;
+               int len;
+               int i;
+
+               for_each_sgtable_dma_sg(sg_table, sg, i) {
+                       addr = sg_dma_address(sg);
+                       len = sg_dma_len(sg);
+                       if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+                               pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+                                        __func__, addr, len);
+                       }
+               }
+       }
+#endif /* CONFIG_DMA_API_DEBUG */
+
        return sg_table;
 }
 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -1188,68 +1209,72 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
  * address space. Same restrictions as for vmap and friends apply.
  * @dmabuf:    [in]    buffer to vmap
+ * @map:       [out]   returns the vmap pointer
  *
  * This call may fail due to lack of virtual mapping address space.
  * These calls are optional in drivers. The intended use for them
  * is for mapping objects linear in kernel space for high use objects.
  * Please attempt to use kmap/kunmap before thinking about these interfaces.
  *
- * Returns NULL on error.
+ * Returns 0 on success, or a negative errno code otherwise.
  */
-void *dma_buf_vmap(struct dma_buf *dmabuf)
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
-       void *ptr;
+       struct dma_buf_map ptr;
+       int ret = 0;
+
+       dma_buf_map_clear(map);
 
        if (WARN_ON(!dmabuf))
-               return NULL;
+               return -EINVAL;
 
        if (!dmabuf->ops->vmap)
-               return NULL;
+               return -EINVAL;
 
        mutex_lock(&dmabuf->lock);
        if (dmabuf->vmapping_counter) {
                dmabuf->vmapping_counter++;
-               BUG_ON(!dmabuf->vmap_ptr);
-               ptr = dmabuf->vmap_ptr;
+               BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
+               *map = dmabuf->vmap_ptr;
                goto out_unlock;
        }
 
-       BUG_ON(dmabuf->vmap_ptr);
+       BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
 
-       ptr = dmabuf->ops->vmap(dmabuf);
-       if (WARN_ON_ONCE(IS_ERR(ptr)))
-               ptr = NULL;
-       if (!ptr)
+       ret = dmabuf->ops->vmap(dmabuf, &ptr);
+       if (WARN_ON_ONCE(ret))
                goto out_unlock;
 
        dmabuf->vmap_ptr = ptr;
        dmabuf->vmapping_counter = 1;
 
+       *map = dmabuf->vmap_ptr;
+
 out_unlock:
        mutex_unlock(&dmabuf->lock);
-       return ptr;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(dma_buf_vmap);
 
 /**
  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
  * @dmabuf:    [in]    buffer to vunmap
- * @vaddr:     [in]    vmap to vunmap
+ * @map:       [in]    vmap pointer to vunmap
  */
-void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
        if (WARN_ON(!dmabuf))
                return;
 
-       BUG_ON(!dmabuf->vmap_ptr);
+       BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
        BUG_ON(dmabuf->vmapping_counter == 0);
-       BUG_ON(dmabuf->vmap_ptr != vaddr);
+       BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
 
        mutex_lock(&dmabuf->lock);
        if (--dmabuf->vmapping_counter == 0) {
                if (dmabuf->ops->vunmap)
-                       dmabuf->ops->vunmap(dmabuf, vaddr);
-               dmabuf->vmap_ptr = NULL;
+                       dmabuf->ops->vunmap(dmabuf, map);
+               dma_buf_map_clear(&dmabuf->vmap_ptr);
        }
        mutex_unlock(&dmabuf->lock);
 }
index 1c8f2581cb09a438c49a87eb2ca7b5f6f6160b08..bb5a42b10c2900629398e3ac4a33eff657feb472 100644 (file)
@@ -63,7 +63,7 @@ static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
 {
        struct dma_resv_list *list;
 
-       list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+       list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
        if (!list)
                return NULL;
 
index d0696cf937af3476578bac4a10464e8948720545..fcf4ce3e2cbb5eba326b9ce669b697e03b8ae004 100644 (file)
@@ -235,7 +235,7 @@ static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
        return 0;
 }
 
-static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
        struct heap_helper_buffer *buffer = dmabuf->priv;
        void *vaddr;
@@ -244,10 +244,14 @@ static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
        vaddr = dma_heap_buffer_vmap_get(buffer);
        mutex_unlock(&buffer->lock);
 
-       return vaddr;
+       if (!vaddr)
+               return -ENOMEM;
+       dma_buf_map_set_vaddr(map, vaddr);
+
+       return 0;
 }
 
-static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
        struct heap_helper_buffer *buffer = dmabuf->priv;
 
index 147d61b9674ea119bd66327095dd04cf6b802a2d..64376dd298edfe793e18a5cb478211d5c73232ce 100644 (file)
@@ -182,13 +182,6 @@ config DRM_TTM
          GPU memory types. Will be enabled automatically if a device driver
          uses it.
 
-config DRM_TTM_DMA_PAGE_POOL
-       bool
-       depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
-       default y
-       help
-         Choose this if you need the TTM dma page pool
-
 config DRM_VRAM_HELPER
        tristate
        depends on DRM
@@ -287,6 +280,7 @@ config DRM_VKMS
        tristate "Virtual KMS (EXPERIMENTAL)"
        depends on DRM
        select DRM_KMS_HELPER
+       select DRM_GEM_SHMEM_HELPER
        select CRC32
        default n
        help
index 5da487b64a668a4590d85bd2123cd523c0ced8bd..054a1c2d5054203de2406d1d98a3b03db6cea4aa 100644 (file)
@@ -1479,7 +1479,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                }
        }
 
-       if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
+       if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
                amdgpu_bo_fence(bo,
                                &avm->process_info->eviction_fence->base,
                                true);
@@ -1558,7 +1558,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
         * required.
         */
        if (mem->mapped_to_gpu_memory == 0 &&
-           !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
+           !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
+           !mem->bo->tbo.pin_count)
                amdgpu_amdkfd_remove_eviction_fence(mem->bo,
                                                process_info->eviction_fence);
 
index 12598a4b5c788f5473d08aad318e1aa1a0b35508..d50b63a93d37f7c403cb571795f102cfb94d4389 100644 (file)
@@ -410,7 +410,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
        uint32_t domain;
        int r;
 
-       if (bo->pin_count)
+       if (bo->tbo.pin_count)
                return 0;
 
        /* Don't move this buffer if we have depleted our allowance
index 2d125b8b15ee1962f2569087c0b8d006512fca52..065937482239f5c29768bb4e242dfaba0e7a1a72 100644 (file)
@@ -1319,6 +1319,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = drm_to_adev(dev);
+       struct ttm_resource_manager *man;
        int r;
 
        r = pm_runtime_get_sync(dev->dev);
@@ -1327,7 +1328,9 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
                return r;
        }
 
-       seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+       man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+       r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+       seq_printf(m, "(%d)\n", r);
 
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
index 7cc7af2a6822e4795d1b096cf723011909dce9aa..b25faaee6f0e9c0c241672e954371cf0cdac8cff 100644 (file)
@@ -132,10 +132,7 @@ static void amdgpu_display_unpin_work_func(struct work_struct *__work)
        /* unpin of the old buffer */
        r = amdgpu_bo_reserve(work->old_abo, true);
        if (likely(r == 0)) {
-               r = amdgpu_bo_unpin(work->old_abo);
-               if (unlikely(r != 0)) {
-                       DRM_ERROR("failed to unpin buffer after flip\n");
-               }
+               amdgpu_bo_unpin(work->old_abo);
                amdgpu_bo_unreserve(work->old_abo);
        } else
                DRM_ERROR("failed to reserve buffer after flip\n");
@@ -249,8 +246,7 @@ pflip_cleanup:
        }
 unpin:
        if (!adev->enable_virtual_display)
-               if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
-                       DRM_ERROR("failed to unpin new abo in error path\n");
+               amdgpu_bo_unpin(new_abo);
 
 unreserve:
        amdgpu_bo_unreserve(new_abo);
index 957934926b24529341b3b0a4c13468d7e012afee..5b465ab774d14189dfba28faa5f962e3c2f15406 100644 (file)
@@ -281,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
        struct sg_table *sgt;
        long r;
 
-       if (!bo->pin_count) {
+       if (!bo->tbo.pin_count) {
                /* move buffer into GTT or VRAM */
                struct ttm_operation_ctx ctx = { false, false };
                unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
@@ -390,7 +390,8 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
        if (unlikely(ret != 0))
                return ret;
 
-       if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+       if (!bo->tbo.pin_count &&
+           (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
                amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
                ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        }
index 42d9748921f5e3b5732d0915b3e9bcd54c6c3a34..8b30915aa9720b52ef899cdee8527756a94baa21 100644 (file)
@@ -1520,19 +1520,13 @@ static struct drm_driver kms_driver = {
        .lastclose = amdgpu_driver_lastclose_kms,
        .irq_handler = amdgpu_irq_handler,
        .ioctls = amdgpu_ioctls_kms,
-       .gem_free_object_unlocked = amdgpu_gem_object_free,
-       .gem_open_object = amdgpu_gem_object_open,
-       .gem_close_object = amdgpu_gem_object_close,
        .dumb_create = amdgpu_mode_dumb_create,
        .dumb_map_offset = amdgpu_mode_dumb_mmap,
        .fops = &amdgpu_driver_kms_fops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = amdgpu_gem_prime_export,
        .gem_prime_import = amdgpu_gem_prime_import,
-       .gem_prime_vmap = amdgpu_gem_prime_vmap,
-       .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
        .gem_prime_mmap = amdgpu_gem_prime_mmap,
 
        .name = DRIVER_NAME,
index 7e8265da9f2508e1eb9e584e9848462a45cd63fc..8ea6fc7457690fb40520e8837905b93b87d9fa7b 100644 (file)
 
 #include "amdgpu.h"
 #include "amdgpu_display.h"
+#include "amdgpu_dma_buf.h"
 #include "amdgpu_xgmi.h"
 
-void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+
+static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 {
        struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 
@@ -87,6 +90,7 @@ retry:
                return r;
        }
        *obj = &bo->tbo.base;
+       (*obj)->funcs = &amdgpu_gem_object_funcs;
 
        return 0;
 }
@@ -119,8 +123,8 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
  * Call from drm_gem_handle_create which appear in both new and open ioctl
  * case.
  */
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
-                          struct drm_file *file_priv)
+static int amdgpu_gem_object_open(struct drm_gem_object *obj,
+                                 struct drm_file *file_priv)
 {
        struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
@@ -152,8 +156,8 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
        return 0;
 }
 
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
-                            struct drm_file *file_priv)
+static void amdgpu_gem_object_close(struct drm_gem_object *obj,
+                                   struct drm_file *file_priv)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -211,6 +215,15 @@ out_unlock:
        ttm_eu_backoff_reservation(&ticket, &list);
 }
 
+static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+       .free = amdgpu_gem_object_free,
+       .open = amdgpu_gem_object_open,
+       .close = amdgpu_gem_object_close,
+       .export = amdgpu_gem_prime_export,
+       .vmap = amdgpu_gem_prime_vmap,
+       .vunmap = amdgpu_gem_prime_vunmap,
+};
+
 /*
  * GEM ioctls.
  */
@@ -870,7 +883,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
        seq_printf(m, "\t0x%08x: %12ld byte %s",
                   id, amdgpu_bo_size(bo), placement);
 
-       pin_count = READ_ONCE(bo->pin_count);
+       pin_count = READ_ONCE(bo->tbo.pin_count);
        if (pin_count)
                seq_printf(m, " pin count %d", pin_count);
 
index e0f025dd1b14aa4b0002f5c16f19811115fccd98..637bf51dbf06f0bf2f0701e4a54d4711f8598c77 100644 (file)
 #define AMDGPU_GEM_DOMAIN_MAX          0x3
 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
 
-void amdgpu_gem_object_free(struct drm_gem_object *obj);
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
-                               struct drm_file *file_priv);
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
-                               struct drm_file *file_priv);
 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
 
 /*
index 36604d751d622e8dc2f6498969385068ca75dbad..cc86f431a3d4023c0106bfb6b818e5606fe1cd5a 100644 (file)
@@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
                               uint64_t *addr, uint64_t *flags)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct ttm_dma_tt *ttm;
 
        switch (bo->tbo.mem.mem_type) {
        case TTM_PL_TT:
-               ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
-               *addr = ttm->dma_address[0];
+               *addr = bo->tbo.ttm->dma_address[0];
                break;
        case TTM_PL_VRAM:
                *addr = amdgpu_bo_gpu_offset(bo);
@@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
-       struct ttm_dma_tt *ttm;
 
-       if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
+       if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
                return AMDGPU_BO_INVALID_OFFSET;
 
-       ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
-       if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+       if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
                return AMDGPU_BO_INVALID_OFFSET;
 
-       return adev->gmc.agp_start + ttm->dma_address[0];
+       return adev->gmc.agp_start + bo->ttm->dma_address[0];
 }
 
 /**
index f203e4a6a3f2b062dce032044229d109e817def3..1721739def84a3a265a5e53b1d260f825142283d 100644 (file)
@@ -136,7 +136,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
 
        ttm_resource_manager_set_used(man, false);
 
-       ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+       ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
        if (ret)
                return;
 
index ac043baac05d6d234ee726794176cf624ccccb5e..1aa516429c80a7d7598507a6966dd459c8e70078 100644 (file)
@@ -78,7 +78,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
 
-       if (bo->pin_count > 0)
+       if (bo->tbo.pin_count > 0)
                amdgpu_bo_subtract_pin_size(bo);
 
        amdgpu_bo_kunmap(bo);
@@ -137,7 +137,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].fpfn = 0;
                places[c].lpfn = 0;
                places[c].mem_type = TTM_PL_VRAM;
-               places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+               places[c].flags = 0;
 
                if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
                        places[c].lpfn = visible_pfn;
@@ -154,11 +154,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].lpfn = 0;
                places[c].mem_type = TTM_PL_TT;
                places[c].flags = 0;
-               if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
-                       places[c].flags |= TTM_PL_FLAG_WC |
-                               TTM_PL_FLAG_UNCACHED;
-               else
-                       places[c].flags |= TTM_PL_FLAG_CACHED;
                c++;
        }
 
@@ -167,11 +162,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].lpfn = 0;
                places[c].mem_type = TTM_PL_SYSTEM;
                places[c].flags = 0;
-               if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
-                       places[c].flags |= TTM_PL_FLAG_WC |
-                               TTM_PL_FLAG_UNCACHED;
-               else
-                       places[c].flags |= TTM_PL_FLAG_CACHED;
                c++;
        }
 
@@ -179,7 +169,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].fpfn = 0;
                places[c].lpfn = 0;
                places[c].mem_type = AMDGPU_PL_GDS;
-               places[c].flags = TTM_PL_FLAG_UNCACHED;
+               places[c].flags = 0;
                c++;
        }
 
@@ -187,7 +177,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].fpfn = 0;
                places[c].lpfn = 0;
                places[c].mem_type = AMDGPU_PL_GWS;
-               places[c].flags = TTM_PL_FLAG_UNCACHED;
+               places[c].flags = 0;
                c++;
        }
 
@@ -195,7 +185,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].fpfn = 0;
                places[c].lpfn = 0;
                places[c].mem_type = AMDGPU_PL_OA;
-               places[c].flags = TTM_PL_FLAG_UNCACHED;
+               places[c].flags = 0;
                c++;
        }
 
@@ -203,7 +193,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                places[c].fpfn = 0;
                places[c].lpfn = 0;
                places[c].mem_type = TTM_PL_SYSTEM;
-               places[c].flags = TTM_PL_MASK_CACHING;
+               places[c].flags = 0;
                c++;
        }
 
@@ -721,7 +711,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
        uint32_t domain;
        int r;
 
-       if (bo->pin_count)
+       if (bo->tbo.pin_count)
                return 0;
 
        domain = bo->preferred_domains;
@@ -918,13 +908,13 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
         */
        domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 
-       if (bo->pin_count) {
+       if (bo->tbo.pin_count) {
                uint32_t mem_type = bo->tbo.mem.mem_type;
 
                if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
                        return -EINVAL;
 
-               bo->pin_count++;
+               ttm_bo_pin(&bo->tbo);
 
                if (max_offset != 0) {
                        u64 domain_start = amdgpu_ttm_domain_start(adev,
@@ -955,7 +945,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                if (!bo->placements[i].lpfn ||
                    (lpfn && lpfn < bo->placements[i].lpfn))
                        bo->placements[i].lpfn = lpfn;
-               bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
 
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -964,7 +953,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                goto error;
        }
 
-       bo->pin_count = 1;
+       ttm_bo_pin(&bo->tbo);
 
        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -1006,34 +995,16 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
  * Returns:
  * 0 for success or a negative error code on failure.
  */
-int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+void amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct ttm_operation_ctx ctx = { false, false };
-       int r, i;
-
-       if (WARN_ON_ONCE(!bo->pin_count)) {
-               dev_warn(adev->dev, "%p unpin not necessary\n", bo);
-               return 0;
-       }
-       bo->pin_count--;
-       if (bo->pin_count)
-               return 0;
+       ttm_bo_unpin(&bo->tbo);
+       if (bo->tbo.pin_count)
+               return;
 
        amdgpu_bo_subtract_pin_size(bo);
 
        if (bo->tbo.base.import_attach)
                dma_buf_unpin(bo->tbo.base.import_attach);
-
-       for (i = 0; i < bo->placement.num_placement; i++) {
-               bo->placements[i].lpfn = 0;
-               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (unlikely(r))
-               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-
-       return r;
 }
 
 /**
@@ -1048,6 +1019,8 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
  */
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
+       struct ttm_resource_manager *man;
+
        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
 #ifndef CONFIG_HIBERNATION
        if (adev->flags & AMD_IS_APU) {
@@ -1055,7 +1028,9 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
                return 0;
        }
 #endif
-       return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+
+       man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+       return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
 }
 
 static const char *amdgpu_vram_names[] = {
@@ -1360,19 +1335,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
  * Returns:
  * 0 for success or a negative error code on failure.
  */
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct ttm_operation_ctx ctx = { false, false };
-       struct amdgpu_bo *abo;
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        unsigned long offset, size;
        int r;
 
-       if (!amdgpu_bo_is_amdgpu_bo(bo))
-               return 0;
-
-       abo = ttm_to_amdgpu_bo(bo);
-
        /* Remember that this BO was accessed by the CPU */
        abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
@@ -1385,8 +1355,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                return 0;
 
        /* Can't move a pinned BO to visible VRAM */
-       if (abo->pin_count > 0)
-               return -EINVAL;
+       if (abo->tbo.pin_count > 0)
+               return VM_FAULT_SIGBUS;
 
        /* hurrah the memory is not visible ! */
        atomic64_inc(&adev->num_vram_cpu_page_faults);
@@ -1398,15 +1368,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        abo->placement.busy_placement = &abo->placements[1];
 
        r = ttm_bo_validate(bo, &abo->placement, &ctx);
-       if (unlikely(r != 0))
-               return r;
+       if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+               return VM_FAULT_NOPAGE;
+       else if (unlikely(r))
+               return VM_FAULT_SIGBUS;
 
        offset = bo->mem.start << PAGE_SHIFT;
        /* this should never happen */
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            (offset + size) > adev->gmc.visible_vram_size)
-               return -EINVAL;
+               return VM_FAULT_SIGBUS;
 
+       ttm_bo_move_to_lru_tail_unlocked(bo);
        return 0;
 }
 
@@ -1489,7 +1462,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
        WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
-                    !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
+                    !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
                     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
index 5ddb6cf9603019b57cbfdb44fe18c73e2265e270..132e5f955180d764f0b142da7b0d4b0b360ce596 100644 (file)
@@ -89,7 +89,6 @@ struct amdgpu_bo {
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
        u64                             flags;
-       unsigned                        pin_count;
        u64                             tiling_flags;
        u64                             metadata_flags;
        void                            *metadata;
@@ -267,7 +266,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo);
 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                             u64 min_offset, u64 max_offset);
-int amdgpu_bo_unpin(struct amdgpu_bo *bo);
+void amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 int amdgpu_bo_late_init(struct amdgpu_device *adev);
@@ -285,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
                           struct ttm_resource *new_mem);
 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared);
 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
index 8039d239958466989b6626187584867f14c9f58a..bd6e6641c3fc1f7ae70e58be73f76207ac83f374 100644 (file)
@@ -47,7 +47,6 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
 
 #include <drm/drm_debugfs.h>
 #include <drm/amdgpu_drm.h>
@@ -66,6 +65,8 @@
 static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
                                   struct ttm_tt *ttm,
                                   struct ttm_resource *bo_mem);
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+                                     struct ttm_tt *ttm);
 
 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
                                    unsigned int type,
@@ -92,7 +93,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_MASK_CACHING
+               .flags = 0
        };
 
        /* Don't handle scatter gather BOs */
@@ -292,11 +293,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
        cpu_addr = &job->ibs[0].ptr[num_dw];
 
        if (mem->mem_type == TTM_PL_TT) {
-               struct ttm_dma_tt *dma;
                dma_addr_t *dma_address;
 
-               dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
-               dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+               dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
                r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
                                    cpu_addr);
                if (r)
@@ -538,19 +537,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
        placements.fpfn = 0;
        placements.lpfn = 0;
        placements.mem_type = TTM_PL_TT;
-       placements.flags = TTM_PL_MASK_CACHING;
+       placements.flags = 0;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                pr_err("Failed to find GTT space for blit from VRAM\n");
                return r;
        }
 
-       /* set caching flags */
-       r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
-       if (unlikely(r)) {
-               goto out_cleanup;
-       }
-
        r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
        if (unlikely(r))
                goto out_cleanup;
@@ -567,8 +560,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
                goto out_cleanup;
        }
 
-       /* move BO (in tmp_mem) to new_mem */
-       r = ttm_bo_move_ttm(bo, ctx, new_mem);
+       r = ttm_bo_wait_ctx(bo, ctx);
+       if (unlikely(r))
+               goto out_cleanup;
+
+       amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+       ttm_resource_free(bo, &bo->mem);
+       ttm_bo_assign_mem(bo, new_mem);
 out_cleanup:
        ttm_resource_free(bo, &tmp_mem);
        return r;
@@ -599,7 +597,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
        placements.fpfn = 0;
        placements.lpfn = 0;
        placements.mem_type = TTM_PL_TT;
-       placements.flags = TTM_PL_MASK_CACHING;
+       placements.flags = 0;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                pr_err("Failed to find GTT space for blit to VRAM\n");
@@ -607,11 +605,16 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
        }
 
        /* move/bind old memory to GTT space */
-       r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
+       r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+       if (unlikely(r))
+               return r;
+
+       r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
 
+       ttm_bo_assign_mem(bo, &tmp_mem);
        /* copy to VRAM */
        r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
        if (unlikely(r)) {
@@ -660,9 +663,17 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
        struct ttm_resource *old_mem = &bo->mem;
        int r;
 
+       if (new_mem->mem_type == TTM_PL_TT) {
+               r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
+               if (r)
+                       return r;
+       }
+
+       amdgpu_bo_move_notify(bo, evict, new_mem);
+
        /* Can't move a pinned BO */
        abo = ttm_to_amdgpu_bo(bo);
-       if (WARN_ON_ONCE(abo->pin_count > 0))
+       if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
                return -EINVAL;
 
        adev = amdgpu_ttm_adev(bo->bdev);
@@ -671,14 +682,24 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
-       if ((old_mem->mem_type == TTM_PL_TT &&
-            new_mem->mem_type == TTM_PL_SYSTEM) ||
-           (old_mem->mem_type == TTM_PL_SYSTEM &&
-            new_mem->mem_type == TTM_PL_TT)) {
-               /* bind is enough */
+       if (old_mem->mem_type == TTM_PL_SYSTEM &&
+           new_mem->mem_type == TTM_PL_TT) {
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
+
+       if (old_mem->mem_type == TTM_PL_TT &&
+           new_mem->mem_type == TTM_PL_SYSTEM) {
+               r = ttm_bo_wait_ctx(bo, ctx);
+               if (r)
+                       goto fail;
+
+               amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+               ttm_resource_free(bo, &bo->mem);
+               ttm_bo_assign_mem(bo, new_mem);
+               return 0;
+       }
+
        if (old_mem->mem_type == AMDGPU_PL_GDS ||
            old_mem->mem_type == AMDGPU_PL_GWS ||
            old_mem->mem_type == AMDGPU_PL_OA ||
@@ -712,12 +733,12 @@ memcpy:
                if (!amdgpu_mem_visible(adev, old_mem) ||
                    !amdgpu_mem_visible(adev, new_mem)) {
                        pr_err("Move buffer fallback to memcpy unavailable\n");
-                       return r;
+                       goto fail;
                }
 
                r = ttm_bo_move_memcpy(bo, ctx, new_mem);
                if (r)
-                       return r;
+                       goto fail;
        }
 
        if (bo->type == ttm_bo_type_device &&
@@ -732,6 +753,11 @@ memcpy:
        /* update statistics */
        atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
        return 0;
+fail:
+       swap(*new_mem, bo->mem);
+       amdgpu_bo_move_notify(bo, false, new_mem);
+       swap(*new_mem, bo->mem);
+       return r;
 }
 
 /**
@@ -767,6 +793,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
 
                mem->bus.offset += adev->gmc.aper_base;
                mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_write_combined;
                break;
        default:
                return -EINVAL;
@@ -811,7 +838,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
  * TTM backend functions.
  */
 struct amdgpu_ttm_tt {
-       struct ttm_dma_tt       ttm;
+       struct ttm_tt   ttm;
        struct drm_gem_object   *gobj;
        u64                     offset;
        uint64_t                userptr;
@@ -943,7 +970,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
        if (!gtt || !gtt->userptr)
                return false;
 
-       DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
+       DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
                gtt->userptr, ttm->num_pages);
 
        WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
@@ -1095,7 +1122,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 
 gart_bind_fail:
        if (r)
-               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
                          ttm->num_pages, gtt->offset);
 
        return r;
@@ -1130,7 +1157,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
                }
        }
        if (!ttm->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+               WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
        }
 
@@ -1153,7 +1180,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
                ttm->pages, gtt->ttm.dma_address, flags);
 
        if (r)
-               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
                          ttm->num_pages, gtt->offset);
        gtt->bound = true;
        return r;
@@ -1267,8 +1294,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
        r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        if (r)
-               DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
-                         gtt->ttm.ttm.num_pages, gtt->offset);
+               DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
+                         gtt->ttm.num_pages, gtt->offset);
        gtt->bound = false;
 }
 
@@ -1282,7 +1309,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
        if (gtt->usertask)
                put_task_struct(gtt->usertask);
 
-       ttm_dma_tt_fini(&gtt->ttm);
+       ttm_tt_fini(&gtt->ttm);
        kfree(gtt);
 }
 
@@ -1296,7 +1323,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
                                           uint32_t page_flags)
 {
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_ttm_tt *gtt;
+       enum ttm_caching caching;
 
        gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
        if (gtt == NULL) {
@@ -1304,12 +1333,17 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
        }
        gtt->gobj = &bo->base;
 
+       if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+               caching = ttm_write_combined;
+       else
+               caching = ttm_cached;
+
        /* allocate space for the uninitialized page entries */
-       if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
+       if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
                kfree(gtt);
                return NULL;
        }
-       return &gtt->ttm.ttm;
+       return &gtt->ttm;
 }
 
 /**
@@ -1332,7 +1366,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
                        return -ENOMEM;
 
                ttm->page_flags |= TTM_PAGE_FLAG_SG;
-               ttm_tt_set_populated(ttm);
                return 0;
        }
 
@@ -1352,19 +1385,10 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
                                                 gtt->ttm.dma_address,
                                                 ttm->num_pages);
-               ttm_tt_set_populated(ttm);
                return 0;
        }
 
-#ifdef CONFIG_SWIOTLB
-       if (adev->need_swiotlb && swiotlb_nr_tbl()) {
-               return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
-       }
-#endif
-
-       /* fall back to generic helper to populate the page array
-        * and map them to the device */
-       return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
+       return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
 }
 
 /**
@@ -1373,7 +1397,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
  * Unmaps pages of a ttm_tt object from the device address space and
  * unpopulates the page array backing it.
  */
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+                                    struct ttm_tt *ttm)
 {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        struct amdgpu_device *adev;
@@ -1398,16 +1423,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
                return;
 
        adev = amdgpu_ttm_adev(bdev);
-
-#ifdef CONFIG_SWIOTLB
-       if (adev->need_swiotlb && swiotlb_nr_tbl()) {
-               ttm_dma_unpopulate(&gtt->ttm, adev->dev);
-               return;
-       }
-#endif
-
-       /* fall back to generic helper to unmap and unpopulate array */
-       ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
+       return ttm_pool_free(&adev->mman.bdev.pool, ttm);
 }
 
 /**
@@ -1478,7 +1494,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
        /* Return false if no part of the ttm_tt object lies within
         * the range
         */
-       size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+       size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
        if (gtt->userptr > end || gtt->userptr + size <= start)
                return false;
 
@@ -1529,7 +1545,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
        if (mem && mem->mem_type == TTM_PL_TT) {
                flags |= AMDGPU_PTE_SYSTEM;
 
-               if (ttm->caching_state == tt_cached)
+               if (ttm->caching == ttm_cached)
                        flags |= AMDGPU_PTE_SNOOPED;
        }
 
@@ -1699,20 +1715,23 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        return ret;
 }
 
+static void
+amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+       amdgpu_bo_move_notify(bo, false, NULL);
+}
+
 static struct ttm_bo_driver amdgpu_bo_driver = {
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
-       .ttm_tt_bind = &amdgpu_ttm_backend_bind,
-       .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
        .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
        .evict_flags = &amdgpu_evict_flags,
        .move = &amdgpu_bo_move,
        .verify_access = &amdgpu_verify_access,
-       .move_notify = &amdgpu_bo_move_notify,
+       .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
        .release_notify = &amdgpu_bo_release_notify,
-       .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
        .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
        .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
        .access_memory = &amdgpu_ttm_access_memory,
@@ -1884,10 +1903,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        mutex_init(&adev->mman.gtt_window_lock);
 
        /* No others user of address space so set it to 0 */
-       r = ttm_bo_device_init(&adev->mman.bdev,
-                              &amdgpu_bo_driver,
+       r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
                               adev_to_drm(adev)->anon_inode->i_mapping,
                               adev_to_drm(adev)->vma_offset_manager,
+                              adev->need_swiotlb,
                               dma_addressing_limited(adev->dev));
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -2092,15 +2111,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
        adev->mman.buffer_funcs_enabled = enable;
 }
 
+static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
+{
+       struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+       vm_fault_t ret;
+
+       ret = ttm_bo_vm_reserve(bo, vmf);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_bo_fault_reserve_notify(bo);
+       if (ret)
+               goto unlock;
+
+       ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+                                      TTM_BO_VM_NUM_PREFAULT, 1);
+       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+               return ret;
+
+unlock:
+       dma_resv_unlock(bo->base.resv);
+       return ret;
+}
+
+static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+       .fault = amdgpu_ttm_fault,
+       .open = ttm_bo_vm_open,
+       .close = ttm_bo_vm_close,
+       .access = ttm_bo_vm_access
+};
+
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct drm_file *file_priv = filp->private_data;
        struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
+       int r;
 
-       if (adev == NULL)
-               return -EINVAL;
+       r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+       if (unlikely(r != 0))
+               return r;
 
-       return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+       vma->vm_ops = &amdgpu_ttm_vm_ops;
+       return 0;
 }
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
@@ -2284,16 +2336,22 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        return 0;
 }
 
+static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct amdgpu_device *adev = drm_to_adev(dev);
+
+       return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
+}
+
 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
        {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
        {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
        {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
        {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
        {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
-       {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
-       {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+       {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
 };
 
 /**
@@ -2586,12 +2644,6 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
        }
 
        count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
-
-#ifdef CONFIG_SWIOTLB
-       if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
-               --count;
-#endif
-
        return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
 #else
        return 0;
index df110afa97bf46c2c83bf1ba422b58335d8d3f2b..38b59a4fc04c53601a0e78f7ee7f3953edb8546c 100644 (file)
@@ -609,7 +609,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
        if (!amdgpu_bo_is_amdgpu_bo(bo))
                return;
 
-       if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+       if (bo->pin_count)
                return;
 
        abo = ttm_to_amdgpu_bo(bo);
@@ -1790,7 +1790,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                resv = vm->root.base.bo->tbo.base.resv;
        } else {
                struct drm_gem_object *obj = &bo->tbo.base;
-               struct ttm_dma_tt *ttm;
 
                resv = bo->tbo.base.resv;
                if (obj->import_attach && bo_va->is_xgmi) {
@@ -1803,10 +1802,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                }
                mem = &bo->tbo.mem;
                nodes = mem->mm_node;
-               if (mem->mem_type == TTM_PL_TT) {
-                       ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
-                       pages_addr = ttm->dma_address;
-               }
+               if (mem->mem_type == TTM_PL_TT)
+                       pages_addr = bo->tbo.ttm->dma_address;
        }
 
        if (bo) {
index 01c1171afbe02f18208128f37b4e001cfffafc48..7747be644dd08a7310ca0865fe11246e08fc5bf3 100644 (file)
@@ -212,7 +212,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
 
        ttm_resource_manager_set_used(man, false);
 
-       ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+       ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
        if (ret)
                return;
 
index be7c29cec318d13fc45562a1c169d6de98b03b9e..042d7b54a6deae4264620c2d10d934b9982e3210 100644 (file)
@@ -116,7 +116,7 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 }
 
 static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
        struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
 
@@ -127,7 +127,7 @@ static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
        struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
 
index f33418d6e1a0820f9050531a5f38d28e408c4c63..a4bbf56a7fc120f299c6005724f347f06ee52e64 100644 (file)
@@ -273,8 +273,10 @@ komeda_crtc_do_flush(struct drm_crtc *crtc,
 
 static void
 komeda_crtc_atomic_enable(struct drm_crtc *crtc,
-                         struct drm_crtc_state *old)
+                         struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+                                                                  crtc);
        pm_runtime_get_sync(crtc->dev->dev);
        komeda_crtc_prepare(to_kcrtc(crtc));
        drm_crtc_vblank_on(crtc);
@@ -319,8 +321,10 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
 
 static void
 komeda_crtc_atomic_disable(struct drm_crtc *crtc,
-                          struct drm_crtc_state *old)
+                          struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+                                                                  crtc);
        struct komeda_crtc *kcrtc = to_kcrtc(crtc);
        struct komeda_crtc_state *old_st = to_kcrtc_st(old);
        struct komeda_pipeline *master = kcrtc->master;
index 1d767473ba8a0613fc96d8ae9fae8feedcd0e0c9..1f8195bad536ae81acc98231ddfc38df7f96ef6f 100644 (file)
@@ -41,18 +41,7 @@ static int komeda_register_show(struct seq_file *sf, void *x)
        return 0;
 }
 
-static int komeda_register_open(struct inode *inode, struct file *filp)
-{
-       return single_open(filp, komeda_register_show, inode->i_private);
-}
-
-static const struct file_operations komeda_register_fops = {
-       .owner          = THIS_MODULE,
-       .open           = komeda_register_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(komeda_register);
 
 #ifdef CONFIG_DEBUG_FS
 static void komeda_debugfs_init(struct komeda_dev *mdev)
@@ -261,8 +250,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
                goto disable_clk;
        }
 
-       dev->dma_parms = &mdev->dma_parms;
-       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+       dma_set_max_seg_size(dev, U32_MAX);
 
        mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
        if (!mdev->iommu)
index ce27f2f27c240afcdcf836f43deed58fbffbea9f..5b536f0cb5482005b318c11f92610cc0037ccb90 100644 (file)
@@ -163,8 +163,6 @@ struct komeda_dev {
        struct device *dev;
        /** @reg_base: the base address of komeda io space */
        u32 __iomem   *reg_base;
-       /** @dma_parms: the dma parameters of komeda */
-       struct device_dma_parameters dma_parms;
 
        /** @chip: the basic chip information */
        struct komeda_chip_info chip;
index af67fefed38dcb14fcfdda27f100ef3ed2509390..84ac10d59485afd58764cd5d833984b1ac7228f7 100644 (file)
@@ -168,7 +168,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
 }
 
 static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
@@ -179,7 +179,7 @@ static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
index 587d94798f5c2850ac269dd350599bdcf9f603ea..49766eb7a554cb3955100b58baa71c8093337617 100644 (file)
@@ -46,7 +46,7 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
 }
 
 static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
        struct malidp_hw_device *hwdev = malidp->dev;
@@ -70,8 +70,10 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
        struct malidp_hw_device *hwdev = malidp->dev;
        int err;
index a887b6a5f8bd7497953656795988fe71a40602d3..e0fbfc9ce386cabee9eb588a3667d60a3836ae56 100644 (file)
@@ -467,8 +467,10 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
-                                          struct drm_crtc_state *old_state)
+                                          struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
        struct drm_pending_vblank_event *event;
 
@@ -503,8 +505,10 @@ static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
-                                         struct drm_crtc_state *old_state)
+                                         struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 
        DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
index 980d3f1f8f16e965b224135ebfe6fcaaa3cfdd49..22247cfce80b354708d51f2b87672e236195fdc6 100644 (file)
@@ -37,13 +37,10 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops);
 
 static struct drm_driver armada_drm_driver = {
        .lastclose              = drm_fb_helper_lastclose,
-       .gem_free_object_unlocked = armada_gem_free_object,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
-       .gem_prime_export       = armada_gem_prime_export,
        .gem_prime_import       = armada_gem_prime_import,
        .dumb_create            = armada_gem_dumb_create,
-       .gem_vm_ops             = &armada_gem_vm_ops,
        .major                  = 1,
        .minor                  = 0,
        .name                   = "armada-drm",
index 6654bccd9466589b86b84808c1fd681a185b803c..21909642ee4ca9ec3d4529afca3ff5289018dc08 100644 (file)
@@ -25,7 +25,7 @@ static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
        return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
 }
 
-const struct vm_operations_struct armada_gem_vm_ops = {
+static const struct vm_operations_struct armada_gem_vm_ops = {
        .fault  = armada_gem_vm_fault,
        .open   = drm_gem_vm_open,
        .close  = drm_gem_vm_close,
@@ -184,6 +184,12 @@ armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
        return dobj->addr;
 }
 
+static const struct drm_gem_object_funcs armada_gem_object_funcs = {
+       .free = armada_gem_free_object,
+       .export = armada_gem_prime_export,
+       .vm_ops = &armada_gem_vm_ops,
+};
+
 struct armada_gem_object *
 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
 {
@@ -195,6 +201,8 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
        if (!obj)
                return NULL;
 
+       obj->obj.funcs = &armada_gem_object_funcs;
+
        drm_gem_private_object_init(dev, &obj->obj, size);
 
        DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@@ -214,6 +222,8 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
        if (!obj)
                return NULL;
 
+       obj->obj.funcs = &armada_gem_object_funcs;
+
        if (drm_gem_object_init(dev, &obj->obj, size)) {
                kfree(obj);
                return NULL;
index de04cc2c8f0e79fbdda95afc8e95c8acac1db46f..ffcc7e8dd351a3f563cffd15419390c04ff0a08d 100644 (file)
@@ -21,8 +21,6 @@ struct armada_gem_object {
        void                    *update_data;
 };
 
-extern const struct vm_operations_struct armada_gem_vm_ops;
-
 #define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
 
 void armada_gem_free_object(struct drm_gem_object *);
index 018383cfcfa7907e7503e157a450fd7d8245dfc9..5e95bcea43e92cb0e83ea68866a11358e47ae3b3 100644 (file)
@@ -3,6 +3,7 @@ config DRM_ASPEED_GFX
        tristate "ASPEED BMC Display Controller"
        depends on DRM && OF
        depends on (COMPILE_TEST || ARCH_ASPEED)
+       depends on MMU
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DMA_CMA if HAVE_DMA_CONTIGUOUS
index e7ca95827ae89658870223337887adda545b51e9..f1e7e56abc02dda9bdbd7169eb76bde40df9e070 100644 (file)
@@ -75,7 +75,7 @@ int aspeed_gfx_create_output(struct drm_device *drm);
 /* CTRL2 */
 #define CRT_CTRL_DAC_EN                        BIT(0)
 #define CRT_CTRL_VBLANK_LINE(x)                (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
-#define CRT_CTRL_VBLANK_LINE_MASK      GENMASK(20, 31)
+#define CRT_CTRL_VBLANK_LINE_MASK      GENMASK(31, 20)
 
 /* CRT_HORIZ0 */
 #define CRT_H_TOTAL(x)                 (x)
index 2b424b2b85cc8174480843756914e7042fa6b6b0..771ad71cd340a10f6d5ac90c1e891e878ea61c2c 100644 (file)
@@ -193,12 +193,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
 
 static struct drm_driver aspeed_gfx_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
-       .gem_create_object      = drm_gem_cma_create_object_default_funcs,
-       .dumb_create            = drm_gem_cma_dumb_create,
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
-       .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
-       .gem_prime_mmap         = drm_gem_prime_mmap,
+       DRM_GEM_CMA_DRIVER_OPS,
        .fops = &fops,
        .name = "aspeed-gfx-drm",
        .desc = "ASPEED GFX DRM",
@@ -212,6 +207,69 @@ static const struct of_device_id aspeed_gfx_match[] = {
        { }
 };
 
+#define ASPEED_SCU_VGA0                0x50
+#define ASPEED_SCU_MISC_CTRL   0x2c
+
+static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct aspeed_gfx *priv = dev_get_drvdata(dev);
+       u32 val;
+       int rc;
+
+       rc = kstrtou32(buf, 0, &val);
+       if (rc)
+               return rc;
+
+       if (val > 3)
+               return -EINVAL;
+
+       rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16);
+       if (rc < 0)
+               return 0;
+
+       return count;
+}
+
+static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct aspeed_gfx *priv = dev_get_drvdata(dev);
+       u32 reg;
+       int rc;
+
+       rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, &reg);
+       if (rc)
+               return rc;
+
+       return sprintf(buf, "%u\n", (reg >> 16) & 0x3);
+}
+static DEVICE_ATTR_RW(dac_mux);
+
+static ssize_t
+vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct aspeed_gfx *priv = dev_get_drvdata(dev);
+       u32 reg;
+       int rc;
+
+       rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, &reg);
+       if (rc)
+               return rc;
+
+       return sprintf(buf, "%u\n", reg & 1);
+}
+static DEVICE_ATTR_RO(vga_pw);
+
+static struct attribute *aspeed_sysfs_entries[] = {
+       &dev_attr_vga_pw.attr,
+       &dev_attr_dac_mux.attr,
+       NULL,
+};
+
+static struct attribute_group aspeed_sysfs_attr_group = {
+       .attrs = aspeed_sysfs_entries,
+};
+
 static int aspeed_gfx_probe(struct platform_device *pdev)
 {
        struct aspeed_gfx *priv;
@@ -226,6 +284,12 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       dev_set_drvdata(&pdev->dev, priv);
+
+       ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
+       if (ret)
+               return ret;
+
        ret = drm_dev_register(&priv->drm, 0);
        if (ret)
                goto err_unload;
@@ -234,6 +298,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
        return 0;
 
 err_unload:
+       sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
        aspeed_gfx_unload(&priv->drm);
 
        return ret;
@@ -243,6 +308,7 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
 
+       sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
        drm_dev_unregister(drm);
        aspeed_gfx_unload(drm);
 
index 834a156e3a7502b0626e983c4483f2ef0e3df3f5..bd03a8a67e3ab489937b63d819ada883d59e20f6 100644 (file)
@@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
        case DRM_MODE_DPMS_SUSPEND:
                if (ast->tx_chip_type == AST_TX_DP501)
                        ast_set_dp501_video_output(crtc->dev, 1);
-               ast_crtc_load_lut(ast, crtc);
                break;
        case DRM_MODE_DPMS_OFF:
                if (ast->tx_chip_type == AST_TX_DP501)
@@ -777,9 +776,24 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
        return 0;
 }
 
+static void
+ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+{
+       struct ast_private *ast = to_ast_private(crtc->dev);
+       struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
+       struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
+
+       /*
+        * The gamma LUT has to be reloaded after changing the primary
+        * plane's color format.
+        */
+       if (old_ast_crtc_state->format != ast_crtc_state->format)
+               ast_crtc_load_lut(ast, crtc);
+}
+
 static void
 ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
-                             struct drm_crtc_state *old_crtc_state)
+                             struct drm_atomic_state *state)
 {
        struct drm_device *dev = crtc->dev;
        struct ast_private *ast = to_ast_private(dev);
@@ -802,8 +816,10 @@ ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
 
 static void
 ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
-                              struct drm_crtc_state *old_crtc_state)
+                              struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct drm_device *dev = crtc->dev;
        struct ast_private *ast = to_ast_private(dev);
 
@@ -830,6 +846,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
 
 static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
        .atomic_check = ast_crtc_helper_atomic_check,
+       .atomic_flush = ast_crtc_helper_atomic_flush,
        .atomic_enable = ast_crtc_helper_atomic_enable,
        .atomic_disable = ast_crtc_helper_atomic_disable,
 };
index ce246b96330b772ab7f7388b0ee1ce66737f8d49..2b3888df22f8970900a24c52f85716acafc69c2b 100644 (file)
@@ -165,7 +165,7 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c,
 }
 
 static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
-                                           struct drm_crtc_state *old_state)
+                                           struct drm_atomic_state *state)
 {
        struct drm_device *dev = c->dev;
        struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -200,7 +200,7 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
 }
 
 static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
-                                          struct drm_crtc_state *old_state)
+                                          struct drm_atomic_state *state)
 {
        struct drm_device *dev = c->dev;
        struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
index f101dd2819b5290dbb2992c3d04aea0432c14182..45838bd08d37ecc5b413c66958af2a568fdb4071 100644 (file)
@@ -55,9 +55,9 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
        return 0;
 }
 
-int adv7511_hdmi_hw_params(struct device *dev, void *data,
-                          struct hdmi_codec_daifmt *fmt,
-                          struct hdmi_codec_params *hparms)
+static int adv7511_hdmi_hw_params(struct device *dev, void *data,
+                                 struct hdmi_codec_daifmt *fmt,
+                                 struct hdmi_codec_params *hparms)
 {
        struct adv7511 *adv7511 = dev_get_drvdata(dev);
        unsigned int audio_source, i2s_format = 0;
index e1fa7d8203739317c42755255516ec1a08b2f9b6..024ea2a570e7442fdbc7c07ad24f83369ce3adff 100644 (file)
@@ -25,3 +25,12 @@ config DRM_ANALOGIX_ANX78XX
 config DRM_ANALOGIX_DP
        tristate
        depends on DRM
+
+config DRM_ANALOGIX_ANX7625
+       tristate "Analogix Anx7625 MIPI to DP interface support"
+       depends on DRM
+       depends on OF
+       help
+         ANX7625 is an ultra-low power 4K mobile HD transmitter
+         designed for portable devices. It converts MIPI/DPI to
+         DisplayPort1.3 4K.
index 97669b374098ceabbda5d22b95962f41bcfc833b..44da392bb9f9de033f646541b4a835526cba4944 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
 obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX7625) += anx7625.o
 obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
index 914c569ab8c15d10e58e3194f7b4b921816cdb93..fafb4b492ea047666afc30c11ebfbe1aaf85c3c9 100644 (file)
@@ -524,7 +524,7 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
        writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
 }
 
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
+static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
 {
        int reg;
        int retval = 0;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
new file mode 100644 (file)
index 0000000..65cc059
--- /dev/null
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
+ *
+ */
+#include <linux/gcd.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+
+#include "anx7625.h"
+
+/*
+ * There is a sync issue while access I2C register between AP(CPU) and
+ * internal firmware(OCM), to avoid the race condition, AP should access
+ * the reserved slave address before slave address occurs changes.
+ */
+static int i2c_access_workaround(struct anx7625_data *ctx,
+                                struct i2c_client *client)
+{
+       u8 offset;
+       struct device *dev = &client->dev;
+       int ret;
+
+       if (client == ctx->last_client)
+               return 0;
+
+       ctx->last_client = client;
+
+       if (client == ctx->i2c.tcpc_client)
+               offset = RSVD_00_ADDR;
+       else if (client == ctx->i2c.tx_p0_client)
+               offset = RSVD_D1_ADDR;
+       else if (client == ctx->i2c.tx_p1_client)
+               offset = RSVD_60_ADDR;
+       else if (client == ctx->i2c.rx_p0_client)
+               offset = RSVD_39_ADDR;
+       else if (client == ctx->i2c.rx_p1_client)
+               offset = RSVD_7F_ADDR;
+       else
+               offset = RSVD_00_ADDR;
+
+       ret = i2c_smbus_write_byte_data(client, offset, 0x00);
+       if (ret < 0)
+               DRM_DEV_ERROR(dev,
+                             "fail to access i2c id=%x\n:%x",
+                             client->addr, offset);
+
+       return ret;
+}
+
+static int anx7625_reg_read(struct anx7625_data *ctx,
+                           struct i2c_client *client, u8 reg_addr)
+{
+       int ret;
+       struct device *dev = &client->dev;
+
+       i2c_access_workaround(ctx, client);
+
+       ret = i2c_smbus_read_byte_data(client, reg_addr);
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "read i2c fail id=%x:%x\n",
+                             client->addr, reg_addr);
+
+       return ret;
+}
+
+static int anx7625_reg_block_read(struct anx7625_data *ctx,
+                                 struct i2c_client *client,
+                                 u8 reg_addr, u8 len, u8 *buf)
+{
+       int ret;
+       struct device *dev = &client->dev;
+
+       i2c_access_workaround(ctx, client);
+
+       ret = i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf);
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "read i2c block fail id=%x:%x\n",
+                             client->addr, reg_addr);
+
+       return ret;
+}
+
+static int anx7625_reg_write(struct anx7625_data *ctx,
+                            struct i2c_client *client,
+                            u8 reg_addr, u8 reg_val)
+{
+       int ret;
+       struct device *dev = &client->dev;
+
+       i2c_access_workaround(ctx, client);
+
+       ret = i2c_smbus_write_byte_data(client, reg_addr, reg_val);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "fail to write i2c id=%x\n:%x",
+                             client->addr, reg_addr);
+
+       return ret;
+}
+
+static int anx7625_write_or(struct anx7625_data *ctx,
+                           struct i2c_client *client,
+                           u8 offset, u8 mask)
+{
+       int val;
+
+       val = anx7625_reg_read(ctx, client, offset);
+       if (val < 0)
+               return val;
+
+       return anx7625_reg_write(ctx, client, offset, (val | (mask)));
+}
+
+static int anx7625_write_and(struct anx7625_data *ctx,
+                            struct i2c_client *client,
+                            u8 offset, u8 mask)
+{
+       int val;
+
+       val = anx7625_reg_read(ctx, client, offset);
+       if (val < 0)
+               return val;
+
+       return anx7625_reg_write(ctx, client, offset, (val & (mask)));
+}
+
+static int anx7625_write_and_or(struct anx7625_data *ctx,
+                               struct i2c_client *client,
+                               u8 offset, u8 and_mask, u8 or_mask)
+{
+       int val;
+
+       val = anx7625_reg_read(ctx, client, offset);
+       if (val < 0)
+               return val;
+
+       return anx7625_reg_write(ctx, client,
+                                offset, (val & and_mask) | (or_mask));
+}
+
+static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx)
+{
+       return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS);
+}
+
+static int wait_aux_op_finish(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       int val;
+       int ret;
+
+       ret = readx_poll_timeout(anx7625_read_ctrl_status_p0,
+                                ctx, val,
+                                (!(val & AP_AUX_CTRL_OP_EN) || (val < 0)),
+                                2000,
+                                2000 * 150);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "aux operation fail!\n");
+               return -EIO;
+       }
+
+       val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+                              AP_AUX_CTRL_STATUS);
+       if (val < 0 || (val & 0x0F)) {
+               DRM_DEV_ERROR(dev, "aux status %02x\n", val);
+               val = -EIO;
+       }
+
+       return val;
+}
+
+static int anx7625_video_mute_control(struct anx7625_data *ctx,
+                                     u8 status)
+{
+       int ret;
+
+       if (status) {
+               /* Set mute on flag */
+               ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+                                      AP_AV_STATUS, AP_MIPI_MUTE);
+               /* Clear mipi RX en */
+               ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+                                        AP_AV_STATUS, (u8)~AP_MIPI_RX_EN);
+       } else {
+               /* Mute off flag */
+               ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+                                       AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+               /* Set MIPI RX EN */
+               ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+                                       AP_AV_STATUS, AP_MIPI_RX_EN);
+       }
+
+       return ret;
+}
+
+static int anx7625_config_audio_input(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       int ret;
+
+       /* Channel num */
+       ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+                               AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5);
+
+       /* FS */
+       ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+                                   AUDIO_CHANNEL_STATUS_4,
+                                   0xf0, AUDIO_FS_48K);
+       /* Word length */
+       ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+                                   AUDIO_CHANNEL_STATUS_5,
+                                   0xf0, AUDIO_W_LEN_24_24MAX);
+       /* I2S */
+       ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client,
+                               AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE);
+       ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client,
+                                AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE);
+       /* Audio change flag */
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+                               AP_AV_STATUS, AP_AUDIO_CHG);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "fail to config audio.\n");
+
+       return ret;
+}
+
+/* Reduction of fraction a/b */
+static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b)
+{
+       unsigned long gcd_num;
+       unsigned long tmp_a, tmp_b;
+       u32 i = 1;
+
+       gcd_num = gcd(*a, *b);
+       *a /= gcd_num;
+       *b /= gcd_num;
+
+       tmp_a = *a;
+       tmp_b = *b;
+
+       while ((*a > MAX_UNSIGNED_24BIT) || (*b > MAX_UNSIGNED_24BIT)) {
+               i++;
+               *a = tmp_a / i;
+               *b = tmp_b / i;
+       }
+
+       /*
+        * In the end, make a, b larger to have higher ODFC PLL
+        * output frequency accuracy
+        */
+       while ((*a < MAX_UNSIGNED_24BIT) && (*b < MAX_UNSIGNED_24BIT)) {
+               *a <<= 1;
+               *b <<= 1;
+       }
+
+       *a >>= 1;
+       *b >>= 1;
+}
+
+static int anx7625_calculate_m_n(u32 pixelclock,
+                                unsigned long *m,
+                                unsigned long *n,
+                                u8 *post_divider)
+{
+       if (pixelclock > PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN) {
+               /* Pixel clock frequency is too high */
+               DRM_ERROR("pixelclock too high, act(%d), maximum(%lu)\n",
+                         pixelclock,
+                         PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN);
+               return -EINVAL;
+       }
+
+       if (pixelclock < PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX) {
+               /* Pixel clock frequency is too low */
+               DRM_ERROR("pixelclock too low, act(%d), maximum(%lu)\n",
+                         pixelclock,
+                         PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX);
+               return -EINVAL;
+       }
+
+       for (*post_divider = 1;
+               pixelclock < (PLL_OUT_FREQ_MIN / (*post_divider));)
+               *post_divider += 1;
+
+       if (*post_divider > POST_DIVIDER_MAX) {
+               for (*post_divider = 1;
+                       (pixelclock <
+                        (PLL_OUT_FREQ_ABS_MIN / (*post_divider)));)
+                       *post_divider += 1;
+
+               if (*post_divider > POST_DIVIDER_MAX) {
+                       DRM_ERROR("cannot find property post_divider(%d)\n",
+                                 *post_divider);
+                       return -EDOM;
+               }
+       }
+
+       /* Patch to improve the accuracy */
+       if (*post_divider == 7) {
+               /* 27,000,000 is not divisible by 7 */
+               *post_divider = 8;
+       } else if (*post_divider == 11) {
+               /* 27,000,000 is not divisible by 11 */
+               *post_divider = 12;
+       } else if ((*post_divider == 13) || (*post_divider == 14)) {
+               /* 27,000,000 is not divisible by 13 or 14 */
+               *post_divider = 15;
+       }
+
+       if (pixelclock * (*post_divider) > PLL_OUT_FREQ_ABS_MAX) {
+               DRM_ERROR("act clock(%u) large than maximum(%lu)\n",
+                         pixelclock * (*post_divider),
+                         PLL_OUT_FREQ_ABS_MAX);
+               return -EDOM;
+       }
+
+       *m = pixelclock;
+       *n = XTAL_FRQ / (*post_divider);
+
+       anx7625_reduction_of_a_fraction(m, n);
+
+       return 0;
+}
+
+static int anx7625_odfc_config(struct anx7625_data *ctx,
+                              u8 post_divider)
+{
+       int ret;
+       struct device *dev = &ctx->client->dev;
+
+       /* Config input reference clock frequency 27MHz/19.2MHz */
+       ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
+                               ~(REF_CLK_27000KHZ << MIPI_FREF_D_IND));
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
+                               (REF_CLK_27000KHZ << MIPI_FREF_D_IND));
+       /* Post divider */
+       ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
+                                MIPI_DIGITAL_PLL_8, 0x0f);
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_8,
+                               post_divider << 4);
+
+       /* Add patch for MIS2-125 (5pcs ANX7625 fail ATE MBIST test) */
+       ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+                                ~MIPI_PLL_VCO_TUNE_REG_VAL);
+
+       /* Reset ODFC PLL */
+       ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+                                ~MIPI_PLL_RESET_N);
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+                               MIPI_PLL_RESET_N);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "IO error.\n");
+
+       return ret;
+}
+
+static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       unsigned long m, n;
+       u16 htotal;
+       int ret;
+       u8 post_divider = 0;
+
+       ret = anx7625_calculate_m_n(ctx->dt.pixelclock.min * 1000,
+                                   &m, &n, &post_divider);
+
+       if (ret) {
+               DRM_DEV_ERROR(dev, "cannot get property m n value.\n");
+               return ret;
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, "compute M(%lu), N(%lu), divider(%d).\n",
+                            m, n, post_divider);
+
+       /* Configure pixel clock */
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_L,
+                               (ctx->dt.pixelclock.min / 1000) & 0xFF);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_H,
+                                (ctx->dt.pixelclock.min / 1000) >> 8);
+       /* Lane count */
+       ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_LANE_CTRL_0, 0xfc);
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client,
+                               MIPI_LANE_CTRL_0, 3);
+
+       /* Htotal */
+       htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min +
+               ctx->dt.hback_porch.min + ctx->dt.hsync_len.min;
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_TOTAL_PIXELS_L, htotal & 0xFF);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_TOTAL_PIXELS_H, htotal >> 8);
+       /* Hactive */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_ACTIVE_PIXELS_L, ctx->dt.hactive.min & 0xFF);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_ACTIVE_PIXELS_H, ctx->dt.hactive.min >> 8);
+       /* HFP */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_FRONT_PORCH_L, ctx->dt.hfront_porch.min);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_FRONT_PORCH_H,
+                       ctx->dt.hfront_porch.min >> 8);
+       /* HWS */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_SYNC_WIDTH_L, ctx->dt.hsync_len.min);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_SYNC_WIDTH_H, ctx->dt.hsync_len.min >> 8);
+       /* HBP */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_BACK_PORCH_L, ctx->dt.hback_porch.min);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       HORIZONTAL_BACK_PORCH_H, ctx->dt.hback_porch.min >> 8);
+       /* Vactive */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_L,
+                       ctx->dt.vactive.min);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_H,
+                       ctx->dt.vactive.min >> 8);
+       /* VFP */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       VERTICAL_FRONT_PORCH, ctx->dt.vfront_porch.min);
+       /* VWS */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       VERTICAL_SYNC_WIDTH, ctx->dt.vsync_len.min);
+       /* VBP */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+                       VERTICAL_BACK_PORCH, ctx->dt.vback_porch.min);
+       /* M value */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_PLL_M_NUM_23_16, (m >> 16) & 0xff);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_PLL_M_NUM_15_8, (m >> 8) & 0xff);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_PLL_M_NUM_7_0, (m & 0xff));
+       /* N value */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_PLL_N_NUM_23_16, (n >> 16) & 0xff);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_PLL_N_NUM_15_8, (n >> 8) & 0xff);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_7_0,
+                       (n & 0xff));
+       /* Diff */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                       MIPI_DIGITAL_ADJ_1, 0x3D);
+
+       ret |= anx7625_odfc_config(ctx, post_divider - 1);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "mipi dsi setup IO error.\n");
+
+       return ret;
+}
+
+static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx)
+{
+       int val;
+       struct device *dev = &ctx->client->dev;
+
+       /* Swap MIPI-DSI data lane 3 P and N */
+       val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP);
+       if (val < 0) {
+               DRM_DEV_ERROR(dev, "IO error : access MIPI_SWAP.\n");
+               return -EIO;
+       }
+
+       val |= (1 << MIPI_SWAP_CH3);
+       return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP, val);
+}
+
+static int anx7625_api_dsi_config(struct anx7625_data *ctx)
+
+{
+       int val, ret;
+       struct device *dev = &ctx->client->dev;
+
+       /* Swap MIPI-DSI data lane 3 P and N */
+       ret = anx7625_swap_dsi_lane3(ctx);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "IO error : swap dsi lane 3 fail.\n");
+               return ret;
+       }
+
+       /* DSI clock settings */
+       val = (0 << MIPI_HS_PWD_CLK)            |
+               (0 << MIPI_HS_RT_CLK)           |
+               (0 << MIPI_PD_CLK)              |
+               (1 << MIPI_CLK_RT_MANUAL_PD_EN) |
+               (1 << MIPI_CLK_HS_MANUAL_PD_EN) |
+               (0 << MIPI_CLK_DET_DET_BYPASS)  |
+               (0 << MIPI_CLK_MISS_CTRL)       |
+               (0 << MIPI_PD_LPTX_CH_MANUAL_PD_EN);
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                               MIPI_PHY_CONTROL_3, val);
+
+       /*
+        * Decreased HS prepare timing delay from 160ns to 80ns work with
+        *     a) Dragon board 810 series (Qualcomm AP)
+        *     b) Moving Pixel DSI source (PG3A pattern generator +
+        *      P332 D-PHY Probe) default D-PHY timing
+        *      5ns/step
+        */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                                MIPI_TIME_HS_PRPR, 0x10);
+
+       /* Enable DSI mode*/
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_18,
+                               SELECT_DSI << MIPI_DPI_SELECT);
+
+       ret |= anx7625_dsi_video_timing_config(ctx);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "dsi video timing config fail\n");
+               return ret;
+       }
+
+       /* Toggle m, n ready */
+       ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6,
+                               ~(MIPI_M_NUM_READY | MIPI_N_NUM_READY));
+       usleep_range(1000, 1100);
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6,
+                               MIPI_M_NUM_READY | MIPI_N_NUM_READY);
+
+       /* Configure integer stable register */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                                MIPI_VIDEO_STABLE_CNT, 0x02);
+       /* Power on MIPI RX */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                                MIPI_LANE_CTRL_10, 0x00);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+                                MIPI_LANE_CTRL_10, 0x80);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "IO error : mipi dsi enable init fail.\n");
+
+       return ret;
+}
+
+static int anx7625_dsi_config(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       int ret;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n");
+
+       /* DSC disable */
+       ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+                               R_DSC_CTRL_0, ~DSC_EN);
+
+       ret |= anx7625_api_dsi_config(ctx);
+
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "IO error : api dsi config error.\n");
+               return ret;
+       }
+
+       /* Set MIPI RX EN */
+       ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+                              AP_AV_STATUS, AP_MIPI_RX_EN);
+       /* Clear mute flag */
+       ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+                                AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "IO error : enable mipi rx fail.\n");
+       else
+               DRM_DEV_DEBUG_DRIVER(dev, "success to config DSI\n");
+
+       return ret;
+}
+
+static void anx7625_dp_start(struct anx7625_data *ctx)
+{
+       int ret;
+       struct device *dev = &ctx->client->dev;
+
+       if (!ctx->display_timing_valid) {
+               DRM_DEV_ERROR(dev, "mipi not set display timing yet.\n");
+               return;
+       }
+
+       anx7625_config_audio_input(ctx);
+
+       ret = anx7625_dsi_config(ctx);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "MIPI phy setup error.\n");
+}
+
+static void anx7625_dp_stop(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       int ret;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n");
+
+       /*
+        * Video disable: 0x72:08 bit 7 = 0;
+        * Audio disable: 0x70:87 bit 0 = 0;
+        */
+       ret = anx7625_write_and(ctx, ctx->i2c.tx_p0_client, 0x87, 0xfe);
+       ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f);
+
+       ret |= anx7625_video_mute_control(ctx, 1);
+       if (ret < 0)
+               DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
+}
+
+static int sp_tx_rst_aux(struct anx7625_data *ctx)
+{
+       int ret;
+
+       ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, RST_CTRL2,
+                              AUX_RST);
+       ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, RST_CTRL2,
+                                ~AUX_RST);
+       return ret;
+}
+
+static int sp_tx_aux_wr(struct anx7625_data *ctx, u8 offset)
+{
+       int ret;
+
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                               AP_AUX_BUFF_START, offset);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                AP_AUX_COMMAND, 0x04);
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+                               AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+       return (ret | wait_aux_op_finish(ctx));
+}
+
+static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd)
+{
+       int ret;
+
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                               AP_AUX_COMMAND, len_cmd);
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+                               AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+       return (ret | wait_aux_op_finish(ctx));
+}
+
+static int sp_tx_get_edid_block(struct anx7625_data *ctx)
+{
+       int c = 0;
+       struct device *dev = &ctx->client->dev;
+
+       sp_tx_aux_wr(ctx, 0x7e);
+       sp_tx_aux_rd(ctx, 0x01);
+       c = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START);
+       if (c < 0) {
+               DRM_DEV_ERROR(dev, "IO error : access AUX BUFF.\n");
+               return -EIO;
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, " EDID Block = %d\n", c + 1);
+
+       if (c > MAX_EDID_BLOCK)
+               c = 1;
+
+       return c;
+}
+
+static int edid_read(struct anx7625_data *ctx,
+                    u8 offset, u8 *pblock_buf)
+{
+       int ret, cnt;
+       struct device *dev = &ctx->client->dev;
+
+       for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
+               sp_tx_aux_wr(ctx, offset);
+               /* Set I2C read com 0x01 mot = 0 and read 16 bytes */
+               ret = sp_tx_aux_rd(ctx, 0xf1);
+
+               if (ret) {
+                       sp_tx_rst_aux(ctx);
+                       DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
+               } else {
+                       ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+                                                    AP_AUX_BUFF_START,
+                                                    MAX_DPCD_BUFFER_SIZE,
+                                                    pblock_buf);
+                       if (ret > 0)
+                               break;
+               }
+       }
+
+       if (cnt > EDID_TRY_CNT)
+               return -EIO;
+
+       return 0;
+}
+
+static int segments_edid_read(struct anx7625_data *ctx,
+                             u8 segment, u8 *buf, u8 offset)
+{
+       u8 cnt;
+       int ret;
+       struct device *dev = &ctx->client->dev;
+
+       /* Write address only */
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                               AP_AUX_ADDR_7_0, 0x30);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                AP_AUX_COMMAND, 0x04);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                AP_AUX_CTRL_STATUS,
+                                AP_AUX_CTRL_ADDRONLY | AP_AUX_CTRL_OP_EN);
+
+       ret |= wait_aux_op_finish(ctx);
+       /* Write segment address */
+       ret |= sp_tx_aux_wr(ctx, segment);
+       /* Data read */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                AP_AUX_ADDR_7_0, 0x50);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "IO error : aux initial fail.\n");
+               return ret;
+       }
+
+       for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
+               sp_tx_aux_wr(ctx, offset);
+               /* Set I2C read com 0x01 mot = 0 and read 16 bytes */
+               ret = sp_tx_aux_rd(ctx, 0xf1);
+
+               if (ret) {
+                       ret = sp_tx_rst_aux(ctx);
+                       DRM_DEV_ERROR(dev, "segment read fail, reset!\n");
+               } else {
+                       ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+                                                    AP_AUX_BUFF_START,
+                                                    MAX_DPCD_BUFFER_SIZE, buf);
+                       if (ret > 0)
+                               break;
+               }
+       }
+
+       if (cnt > EDID_TRY_CNT)
+               return -EIO;
+
+       return 0;
+}
+
+static int sp_tx_edid_read(struct anx7625_data *ctx,
+                          u8 *pedid_blocks_buf)
+{
+       u8 offset, edid_pos;
+       int count, blocks_num;
+       u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
+       u8 i, j;
+       u8 g_edid_break = 0;
+       int ret;
+       struct device *dev = &ctx->client->dev;
+
+       /* Address initial */
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                               AP_AUX_ADDR_7_0, 0x50);
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                AP_AUX_ADDR_15_8, 0);
+       ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+                                AP_AUX_ADDR_19_16, 0xf0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "access aux channel IO error.\n");
+               return -EIO;
+       }
+
+       blocks_num = sp_tx_get_edid_block(ctx);
+       if (blocks_num < 0)
+               return blocks_num;
+
+       count = 0;
+
+       do {
+               switch (count) {
+               case 0:
+               case 1:
+                       for (i = 0; i < 8; i++) {
+                               offset = (i + count * 8) * MAX_DPCD_BUFFER_SIZE;
+                               g_edid_break = edid_read(ctx, offset,
+                                                        pblock_buf);
+
+                               if (g_edid_break)
+                                       break;
+
+                               memcpy(&pedid_blocks_buf[offset],
+                                      pblock_buf,
+                                      MAX_DPCD_BUFFER_SIZE);
+                       }
+
+                       break;
+               case 2:
+                       offset = 0x00;
+
+                       for (j = 0; j < 8; j++) {
+                               edid_pos = (j + count * 8) *
+                                       MAX_DPCD_BUFFER_SIZE;
+
+                               if (g_edid_break == 1)
+                                       break;
+
+                               segments_edid_read(ctx, count / 2,
+                                                  pblock_buf, offset);
+                               memcpy(&pedid_blocks_buf[edid_pos],
+                                      pblock_buf,
+                                      MAX_DPCD_BUFFER_SIZE);
+                               offset = offset + 0x10;
+                       }
+
+                       break;
+               case 3:
+                       offset = 0x80;
+
+                       for (j = 0; j < 8; j++) {
+                               edid_pos = (j + count * 8) *
+                                       MAX_DPCD_BUFFER_SIZE;
+                               if (g_edid_break == 1)
+                                       break;
+
+                               segments_edid_read(ctx, count / 2,
+                                                  pblock_buf, offset);
+                               memcpy(&pedid_blocks_buf[edid_pos],
+                                      pblock_buf,
+                                      MAX_DPCD_BUFFER_SIZE);
+                               offset = offset + 0x10;
+                       }
+
+                       break;
+               default:
+                       break;
+               }
+
+               count++;
+
+       } while (blocks_num >= count);
+
+       /* Check edid data */
+       if (!drm_edid_is_valid((struct edid *)pedid_blocks_buf)) {
+               DRM_DEV_ERROR(dev, "WARNING! edid check fail!\n");
+               return -EINVAL;
+       }
+
+       /* Reset aux channel */
+       sp_tx_rst_aux(ctx);
+
+       return (blocks_num + 1);
+}
+
+static void anx7625_power_on(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+
+       if (!ctx->pdata.low_power_mode) {
+               DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
+               return;
+       }
+
+       /* Power on pin enable */
+       gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+       usleep_range(10000, 11000);
+       /* Power reset pin enable */
+       gpiod_set_value(ctx->pdata.gpio_reset, 1);
+       usleep_range(10000, 11000);
+
+       DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
+}
+
+static void anx7625_power_standby(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+
+       if (!ctx->pdata.low_power_mode) {
+               DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
+               return;
+       }
+
+       gpiod_set_value(ctx->pdata.gpio_reset, 0);
+       usleep_range(1000, 1100);
+       gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+       usleep_range(1000, 1100);
+       DRM_DEV_DEBUG_DRIVER(dev, "power down\n");
+}
+
+/* Basic configurations of ANX7625 */
+static void anx7625_config(struct anx7625_data *ctx)
+{
+       anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                         XTAL_FRQ_SEL, XTAL_FRQ_27M);
+}
+
+static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       int ret;
+
+       /* Reset main ocm */
+       ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
+       /* Disable PD */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                AP_AV_STATUS, AP_DISABLE_PD);
+       /* Release main ocm */
+       ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x00);
+
+       if (ret < 0)
+               DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
+       else
+               DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+}
+
+static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
+{
+       int ret;
+       struct device *dev = &ctx->client->dev;
+
+       /* Check interface workable */
+       ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+                              FLASH_LOAD_STA);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "IO error : access flash load.\n");
+               return ret;
+       }
+       if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK)
+               return -ENODEV;
+
+       anx7625_disable_pd_protocol(ctx);
+
+       DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,",
+                            anx7625_reg_read(ctx,
+                                             ctx->i2c.rx_p0_client,
+                                             OCM_FW_VERSION),
+                            anx7625_reg_read(ctx,
+                                             ctx->i2c.rx_p0_client,
+                                             OCM_FW_REVERSION));
+       DRM_DEV_DEBUG_DRIVER(dev, "Driver version %s\n",
+                            ANX7625_DRV_VERSION);
+
+       return 0;
+}
+
+static void anx7625_power_on_init(struct anx7625_data *ctx)
+{
+       int retry_count, i;
+
+       for (retry_count = 0; retry_count < 3; retry_count++) {
+               anx7625_power_on(ctx);
+               anx7625_config(ctx);
+
+               for (i = 0; i < OCM_LOADING_TIME; i++) {
+                       if (!anx7625_ocm_loading_check(ctx))
+                               return;
+                       usleep_range(1000, 1100);
+               }
+               anx7625_power_standby(ctx);
+       }
+}
+
+static void anx7625_chip_control(struct anx7625_data *ctx, int state)
+{
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "before set, power_state(%d).\n",
+                            atomic_read(&ctx->power_status));
+
+       if (!ctx->pdata.low_power_mode)
+               return;
+
+       if (state) {
+               atomic_inc(&ctx->power_status);
+               if (atomic_read(&ctx->power_status) == 1)
+                       anx7625_power_on_init(ctx);
+       } else {
+               if (atomic_read(&ctx->power_status)) {
+                       atomic_dec(&ctx->power_status);
+
+                       if (atomic_read(&ctx->power_status) == 0)
+                               anx7625_power_standby(ctx);
+               }
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, "after set, power_state(%d).\n",
+                            atomic_read(&ctx->power_status));
+}
+
+static void anx7625_init_gpio(struct anx7625_data *platform)
+{
+       struct device *dev = &platform->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n");
+
+       /* Gpio for chip power enable */
+       platform->pdata.gpio_p_on =
+               devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+       /* Gpio for chip reset */
+       platform->pdata.gpio_reset =
+               devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+
+       if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) {
+               platform->pdata.low_power_mode = 1;
+               DRM_DEV_DEBUG_DRIVER(dev, "low power mode, pon %d, reset %d.\n",
+                                    desc_to_gpio(platform->pdata.gpio_p_on),
+                                    desc_to_gpio(platform->pdata.gpio_reset));
+       } else {
+               platform->pdata.low_power_mode = 0;
+               DRM_DEV_DEBUG_DRIVER(dev, "not low power mode.\n");
+       }
+}
+
+static void anx7625_stop_dp_work(struct anx7625_data *ctx)
+{
+       ctx->hpd_status = 0;
+       ctx->hpd_high_cnt = 0;
+       ctx->display_timing_valid = 0;
+
+       if (ctx->pdata.low_power_mode == 0)
+               anx7625_disable_pd_protocol(ctx);
+}
+
+static void anx7625_start_dp_work(struct anx7625_data *ctx)
+{
+       int ret;
+       struct device *dev = &ctx->client->dev;
+
+       if (ctx->hpd_high_cnt >= 2) {
+               DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n");
+               return;
+       }
+
+       ctx->hpd_high_cnt++;
+
+       /* Not support HDCP */
+       ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f);
+
+       /* Try auth flag */
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10);
+       /* Interrupt for DRM */
+       ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01);
+       if (ret < 0)
+               return;
+
+       ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86);
+       if (ret < 0)
+               return;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret);
+}
+
+static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
+{
+       return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
+}
+
+static void anx7625_hpd_polling(struct anx7625_data *ctx)
+{
+       int ret, val;
+       struct device *dev = &ctx->client->dev;
+
+       if (atomic_read(&ctx->power_status) != 1) {
+               DRM_DEV_DEBUG_DRIVER(dev, "No need to poling HPD status.\n");
+               return;
+       }
+
+       ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
+                                ctx, val,
+                                ((val & HPD_STATUS) || (val < 0)),
+                                5000,
+                                5000 * 100);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "HPD polling timeout!\n");
+       } else {
+               DRM_DEV_DEBUG_DRIVER(dev, "HPD raise up.\n");
+               anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+                                 INTR_ALERT_1, 0xFF);
+               anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                 INTERFACE_CHANGE_INT, 0);
+       }
+
+       anx7625_start_dp_work(ctx);
+}
+
+static void anx7625_disconnect_check(struct anx7625_data *ctx)
+{
+       if (atomic_read(&ctx->power_status) == 0)
+               anx7625_stop_dp_work(ctx);
+}
+
+static void anx7625_low_power_mode_check(struct anx7625_data *ctx,
+                                        int state)
+{
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "low power mode check, state(%d).\n", state);
+
+       if (ctx->pdata.low_power_mode) {
+               anx7625_chip_control(ctx, state);
+               if (state)
+                       anx7625_hpd_polling(ctx);
+               else
+                       anx7625_disconnect_check(ctx);
+       }
+}
+
+static void anx7625_remove_edid(struct anx7625_data *ctx)
+{
+       ctx->slimport_edid_p.edid_block_num = -1;
+}
+
+static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
+{
+       struct device *dev = &ctx->client->dev;
+
+       /* HPD changed */
+       DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n",
+                            (u32)on);
+
+       if (on == 0) {
+               DRM_DEV_DEBUG_DRIVER(dev, " HPD low\n");
+               anx7625_remove_edid(ctx);
+               anx7625_stop_dp_work(ctx);
+       } else {
+               DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n");
+               anx7625_start_dp_work(ctx);
+       }
+
+       ctx->hpd_status = 1;
+}
+
+static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
+{
+       int intr_vector, status;
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "power_status=%d\n",
+                            (u32)atomic_read(&ctx->power_status));
+
+       status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+                                  INTR_ALERT_1, 0xFF);
+       if (status < 0) {
+               DRM_DEV_ERROR(dev, "cannot clear alert reg.\n");
+               return status;
+       }
+
+       intr_vector = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+                                      INTERFACE_CHANGE_INT);
+       if (intr_vector < 0) {
+               DRM_DEV_ERROR(dev, "cannot access interrupt change reg.\n");
+               return intr_vector;
+       }
+       DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x44=%x\n", intr_vector);
+       status = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+                                  INTERFACE_CHANGE_INT,
+                                  intr_vector & (~intr_vector));
+       if (status < 0) {
+               DRM_DEV_ERROR(dev, "cannot clear interrupt change reg.\n");
+               return status;
+       }
+
+       if (!(intr_vector & HPD_STATUS_CHANGE))
+               return -ENOENT;
+
+       status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+                                 SYSTEM_STSTUS);
+       if (status < 0) {
+               DRM_DEV_ERROR(dev, "cannot clear interrupt status.\n");
+               return status;
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status);
+       dp_hpd_change_handler(ctx, status & HPD_STATUS);
+
+       return 0;
+}
+
+static void anx7625_work_func(struct work_struct *work)
+{
+       int event;
+       struct anx7625_data *ctx = container_of(work,
+                                               struct anx7625_data, work);
+
+       mutex_lock(&ctx->lock);
+       event = anx7625_hpd_change_detect(ctx);
+       mutex_unlock(&ctx->lock);
+       if (event < 0)
+               return;
+
+       if (ctx->bridge_attached)
+               drm_helper_hpd_irq_event(ctx->bridge.dev);
+}
+
+static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
+{
+       struct anx7625_data *ctx = (struct anx7625_data *)data;
+
+       if (atomic_read(&ctx->power_status) != 1)
+               return IRQ_NONE;
+
+       queue_work(ctx->workqueue, &ctx->work);
+
+       return IRQ_HANDLED;
+}
+
+static int anx7625_parse_dt(struct device *dev,
+                           struct anx7625_platform_data *pdata)
+{
+       struct device_node *np = dev->of_node;
+       struct drm_panel *panel;
+       int ret;
+
+       pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
+       if (!pdata->mipi_host_node) {
+               DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
+               return -ENODEV;
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n");
+
+       ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+       if (ret < 0) {
+               if (ret == -ENODEV)
+                       return 0;
+               return ret;
+       }
+       if (!panel)
+               return -ENODEV;
+
+       pdata->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+       if (IS_ERR(pdata->panel_bridge))
+               return PTR_ERR(pdata->panel_bridge);
+       DRM_DEV_DEBUG_DRIVER(dev, "get panel node.\n");
+
+       return 0;
+}
+
+static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct anx7625_data, bridge);
+}
+
+static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+       struct s_edid_data *p_edid = &ctx->slimport_edid_p;
+       int edid_num;
+       u8 *edid;
+
+       edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL);
+       if (!edid) {
+               DRM_DEV_ERROR(dev, "Fail to allocate buffer\n");
+               return NULL;
+       }
+
+       if (ctx->slimport_edid_p.edid_block_num > 0) {
+               memcpy(edid, ctx->slimport_edid_p.edid_raw_data,
+                      FOUR_BLOCK_SIZE);
+               return (struct edid *)edid;
+       }
+
+       anx7625_low_power_mode_check(ctx, 1);
+       edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data);
+       anx7625_low_power_mode_check(ctx, 0);
+
+       if (edid_num < 1) {
+               DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
+               kfree(edid);
+               return NULL;
+       }
+
+       p_edid->edid_block_num = edid_num;
+
+       memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE);
+       return (struct edid *)edid;
+}
+
+static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
+{
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n");
+
+       return connector_status_connected;
+}
+
+static int anx7625_attach_dsi(struct anx7625_data *ctx)
+{
+       struct mipi_dsi_device *dsi;
+       struct device *dev = &ctx->client->dev;
+       struct mipi_dsi_host *host;
+       const struct mipi_dsi_device_info info = {
+               .type = "anx7625",
+               .channel = 0,
+               .node = NULL,
+       };
+
+       DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
+
+       host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+       if (!host) {
+               DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
+               return -EINVAL;
+       }
+
+       dsi = mipi_dsi_device_register_full(host, &info);
+       if (IS_ERR(dsi)) {
+               DRM_DEV_ERROR(dev, "fail to create dsi device.\n");
+               return -EINVAL;
+       }
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO   |
+               MIPI_DSI_MODE_VIDEO_SYNC_PULSE  |
+               MIPI_DSI_MODE_EOT_PACKET        |
+               MIPI_DSI_MODE_VIDEO_HSE;
+
+       if (mipi_dsi_attach(dsi) < 0) {
+               DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n");
+               mipi_dsi_device_unregister(dsi);
+               return -EINVAL;
+       }
+
+       ctx->dsi = dsi;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n");
+
+       return 0;
+}
+
+static void anx7625_bridge_detach(struct drm_bridge *bridge)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+
+       if (ctx->dsi) {
+               mipi_dsi_detach(ctx->dsi);
+               mipi_dsi_device_unregister(ctx->dsi);
+       }
+}
+
+static int anx7625_bridge_attach(struct drm_bridge *bridge,
+                                enum drm_bridge_attach_flags flags)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       int err;
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n");
+       if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+               return -EINVAL;
+
+       if (!bridge->encoder) {
+               DRM_DEV_ERROR(dev, "Parent encoder object not found");
+               return -ENODEV;
+       }
+
+       err = anx7625_attach_dsi(ctx);
+       if (err) {
+               DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err);
+               return err;
+       }
+
+       if (ctx->pdata.panel_bridge) {
+               err = drm_bridge_attach(bridge->encoder,
+                                       ctx->pdata.panel_bridge,
+                                       &ctx->bridge, flags);
+               if (err) {
+                       DRM_DEV_ERROR(dev,
+                                     "Fail to attach panel bridge: %d\n", err);
+                       return err;
+               }
+       }
+
+       ctx->bridge_attached = 1;
+
+       return 0;
+}
+
+static enum drm_mode_status
+anx7625_bridge_mode_valid(struct drm_bridge *bridge,
+                         const struct drm_display_info *info,
+                         const struct drm_display_mode *mode)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n");
+
+       /* Max 1200p at 5.4 Ghz, one lane, pixel clock 300M */
+       if (mode->clock > SUPPORT_PIXEL_CLOCK) {
+               DRM_DEV_DEBUG_DRIVER(dev,
+                                    "drm mode invalid, pixelclock too high.\n");
+               return MODE_CLOCK_HIGH;
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm mode valid.\n");
+
+       return MODE_OK;
+}
+
+static void anx7625_bridge_mode_set(struct drm_bridge *bridge,
+                                   const struct drm_display_mode *old_mode,
+                                   const struct drm_display_mode *mode)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n");
+
+       ctx->dt.pixelclock.min = mode->clock;
+       ctx->dt.hactive.min = mode->hdisplay;
+       ctx->dt.hsync_len.min = mode->hsync_end - mode->hsync_start;
+       ctx->dt.hfront_porch.min = mode->hsync_start - mode->hdisplay;
+       ctx->dt.hback_porch.min = mode->htotal - mode->hsync_end;
+       ctx->dt.vactive.min = mode->vdisplay;
+       ctx->dt.vsync_len.min = mode->vsync_end - mode->vsync_start;
+       ctx->dt.vfront_porch.min = mode->vsync_start - mode->vdisplay;
+       ctx->dt.vback_porch.min = mode->vtotal - mode->vsync_end;
+
+       ctx->display_timing_valid = 1;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "pixelclock(%d).\n", ctx->dt.pixelclock.min);
+       DRM_DEV_DEBUG_DRIVER(dev, "hactive(%d), hsync(%d), hfp(%d), hbp(%d)\n",
+                            ctx->dt.hactive.min,
+                            ctx->dt.hsync_len.min,
+                            ctx->dt.hfront_porch.min,
+                            ctx->dt.hback_porch.min);
+       DRM_DEV_DEBUG_DRIVER(dev, "vactive(%d), vsync(%d), vfp(%d), vbp(%d)\n",
+                            ctx->dt.vactive.min,
+                            ctx->dt.vsync_len.min,
+                            ctx->dt.vfront_porch.min,
+                            ctx->dt.vback_porch.min);
+       DRM_DEV_DEBUG_DRIVER(dev, "hdisplay(%d),hsync_start(%d).\n",
+                            mode->hdisplay,
+                            mode->hsync_start);
+       DRM_DEV_DEBUG_DRIVER(dev, "hsync_end(%d),htotal(%d).\n",
+                            mode->hsync_end,
+                            mode->htotal);
+       DRM_DEV_DEBUG_DRIVER(dev, "vdisplay(%d),vsync_start(%d).\n",
+                            mode->vdisplay,
+                            mode->vsync_start);
+       DRM_DEV_DEBUG_DRIVER(dev, "vsync_end(%d),vtotal(%d).\n",
+                            mode->vsync_end,
+                            mode->vtotal);
+}
+
+static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
+                                     const struct drm_display_mode *mode,
+                                     struct drm_display_mode *adj)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+       u32 hsync, hfp, hbp, hblanking;
+       u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj;
+       u32 vref, adj_clock;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n");
+
+       hsync = mode->hsync_end - mode->hsync_start;
+       hfp = mode->hsync_start - mode->hdisplay;
+       hbp = mode->htotal - mode->hsync_end;
+       hblanking = mode->htotal - mode->hdisplay;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "before mode fixup\n");
+       DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n",
+                            hsync, hfp, hbp, adj->clock);
+       DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n",
+                            adj->hsync_start, adj->hsync_end, adj->htotal);
+
+       adj_hfp = hfp;
+       adj_hsync = hsync;
+       adj_hbp = hbp;
+       adj_hblanking = hblanking;
+
+       /* HFP needs to be even */
+       if (hfp & 0x1) {
+               adj_hfp += 1;
+               adj_hblanking += 1;
+       }
+
+       /* HBP needs to be even */
+       if (hbp & 0x1) {
+               adj_hbp -= 1;
+               adj_hblanking -= 1;
+       }
+
+       /* HSYNC needs to be even */
+       if (hsync & 0x1) {
+               if (adj_hblanking < hblanking)
+                       adj_hsync += 1;
+               else
+                       adj_hsync -= 1;
+       }
+
+       /*
+        * Once illegal timing detected, use default HFP, HSYNC, HBP
+        * This adjusting made for built-in eDP panel, for the externel
+        * DP monitor, may need return false.
+        */
+       if (hblanking < HBLANKING_MIN || (hfp < HP_MIN && hbp < HP_MIN)) {
+               adj_hsync = SYNC_LEN_DEF;
+               adj_hfp = HFP_HBP_DEF;
+               adj_hbp = HFP_HBP_DEF;
+               vref = adj->clock * 1000 / (adj->htotal * adj->vtotal);
+               if (hblanking < HBLANKING_MIN) {
+                       delta_adj = HBLANKING_MIN - hblanking;
+                       adj_clock = vref * delta_adj * adj->vtotal;
+                       adj->clock += DIV_ROUND_UP(adj_clock, 1000);
+               } else {
+                       delta_adj = hblanking - HBLANKING_MIN;
+                       adj_clock = vref * delta_adj * adj->vtotal;
+                       adj->clock -= DIV_ROUND_UP(adj_clock, 1000);
+               }
+
+               DRM_WARN("illegal hblanking timing, use default.\n");
+               DRM_WARN("hfp(%d), hbp(%d), hsync(%d).\n", hfp, hbp, hsync);
+       } else if (adj_hfp < HP_MIN) {
+               /* Adjust hfp if hfp less than HP_MIN */
+               delta_adj = HP_MIN - adj_hfp;
+               adj_hfp = HP_MIN;
+
+               /*
+                * Balance total HBlanking pixel, if HBP does not have enough
+                * space, adjust HSYNC length, otherwise adjust HBP
+                */
+               if ((adj_hbp - delta_adj) < HP_MIN)
+                       /* HBP not enough space */
+                       adj_hsync -= delta_adj;
+               else
+                       adj_hbp -= delta_adj;
+       } else if (adj_hbp < HP_MIN) {
+               delta_adj = HP_MIN - adj_hbp;
+               adj_hbp = HP_MIN;
+
+               /*
+                * Balance total HBlanking pixel, if HBP hasn't enough space,
+                * adjust HSYNC length, otherwize adjust HBP
+                */
+               if ((adj_hfp - delta_adj) < HP_MIN)
+                       /* HFP not enough space */
+                       adj_hsync -= delta_adj;
+               else
+                       adj_hfp -= delta_adj;
+       }
+
+       DRM_DEV_DEBUG_DRIVER(dev, "after mode fixup\n");
+       DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n",
+                            adj_hsync, adj_hfp, adj_hbp, adj->clock);
+
+       /* Reconstruct timing */
+       adj->hsync_start = adj->hdisplay + adj_hfp;
+       adj->hsync_end = adj->hsync_start + adj_hsync;
+       adj->htotal = adj->hsync_end + adj_hbp;
+       DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n",
+                            adj->hsync_start, adj->hsync_end, adj->htotal);
+
+       return true;
+}
+
+static void anx7625_bridge_enable(struct drm_bridge *bridge)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm enable\n");
+
+       anx7625_low_power_mode_check(ctx, 1);
+
+       if (WARN_ON(!atomic_read(&ctx->power_status)))
+               return;
+
+       anx7625_dp_start(ctx);
+}
+
+static void anx7625_bridge_disable(struct drm_bridge *bridge)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+
+       if (WARN_ON(!atomic_read(&ctx->power_status)))
+               return;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm disable\n");
+
+       anx7625_dp_stop(ctx);
+
+       anx7625_low_power_mode_check(ctx, 0);
+}
+
+static enum drm_connector_status
+anx7625_bridge_detect(struct drm_bridge *bridge)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
+
+       return anx7625_sink_detect(ctx);
+}
+
+static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
+                                           struct drm_connector *connector)
+{
+       struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+       struct device *dev = &ctx->client->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
+
+       return anx7625_get_edid(ctx);
+}
+
+static const struct drm_bridge_funcs anx7625_bridge_funcs = {
+       .attach = anx7625_bridge_attach,
+       .detach = anx7625_bridge_detach,
+       .disable = anx7625_bridge_disable,
+       .mode_valid = anx7625_bridge_mode_valid,
+       .mode_set = anx7625_bridge_mode_set,
+       .mode_fixup = anx7625_bridge_mode_fixup,
+       .enable = anx7625_bridge_enable,
+       .detect = anx7625_bridge_detect,
+       .get_edid = anx7625_bridge_get_edid,
+};
+
+static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
+                                             struct i2c_client *client)
+{
+       ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter,
+                                                    TX_P0_ADDR >> 1);
+       if (!ctx->i2c.tx_p0_client)
+               return -ENOMEM;
+
+       ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter,
+                                                    TX_P1_ADDR >> 1);
+       if (!ctx->i2c.tx_p1_client)
+               goto free_tx_p0;
+
+       ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter,
+                                                    TX_P2_ADDR >> 1);
+       if (!ctx->i2c.tx_p2_client)
+               goto free_tx_p1;
+
+       ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter,
+                                                    RX_P0_ADDR >> 1);
+       if (!ctx->i2c.rx_p0_client)
+               goto free_tx_p2;
+
+       ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter,
+                                                    RX_P1_ADDR >> 1);
+       if (!ctx->i2c.rx_p1_client)
+               goto free_rx_p0;
+
+       ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter,
+                                                    RX_P2_ADDR >> 1);
+       if (!ctx->i2c.rx_p2_client)
+               goto free_rx_p1;
+
+       ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter,
+                                                   TCPC_INTERFACE_ADDR >> 1);
+       if (!ctx->i2c.tcpc_client)
+               goto free_rx_p2;
+
+       return 0;
+
+free_rx_p2:
+       i2c_unregister_device(ctx->i2c.rx_p2_client);
+free_rx_p1:
+       i2c_unregister_device(ctx->i2c.rx_p1_client);
+free_rx_p0:
+       i2c_unregister_device(ctx->i2c.rx_p0_client);
+free_tx_p2:
+       i2c_unregister_device(ctx->i2c.tx_p2_client);
+free_tx_p1:
+       i2c_unregister_device(ctx->i2c.tx_p1_client);
+free_tx_p0:
+       i2c_unregister_device(ctx->i2c.tx_p0_client);
+
+       return -ENOMEM;
+}
+
+static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
+{
+       i2c_unregister_device(ctx->i2c.tx_p0_client);
+       i2c_unregister_device(ctx->i2c.tx_p1_client);
+       i2c_unregister_device(ctx->i2c.tx_p2_client);
+       i2c_unregister_device(ctx->i2c.rx_p0_client);
+       i2c_unregister_device(ctx->i2c.rx_p1_client);
+       i2c_unregister_device(ctx->i2c.rx_p2_client);
+       i2c_unregister_device(ctx->i2c.tcpc_client);
+}
+
+static int anx7625_i2c_probe(struct i2c_client *client,
+                            const struct i2c_device_id *id)
+{
+       struct anx7625_data *platform;
+       struct anx7625_platform_data *pdata;
+       int ret = 0;
+       struct device *dev = &client->dev;
+
+       if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_SMBUS_I2C_BLOCK)) {
+               DRM_DEV_ERROR(dev, "anx7625's i2c bus doesn't support\n");
+               return -ENODEV;
+       }
+
+       platform = kzalloc(sizeof(*platform), GFP_KERNEL);
+       if (!platform) {
+               DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
+               return -ENOMEM;
+       }
+
+       pdata = &platform->pdata;
+
+       ret = anx7625_parse_dt(dev, pdata);
+       if (ret) {
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+               goto free_platform;
+       }
+
+       platform->client = client;
+       i2c_set_clientdata(client, platform);
+
+       anx7625_init_gpio(platform);
+
+       atomic_set(&platform->power_status, 0);
+
+       mutex_init(&platform->lock);
+
+       platform->pdata.intp_irq = client->irq;
+       if (platform->pdata.intp_irq) {
+               INIT_WORK(&platform->work, anx7625_work_func);
+               platform->workqueue = create_workqueue("anx7625_work");
+               if (!platform->workqueue) {
+                       DRM_DEV_ERROR(dev, "fail to create work queue\n");
+                       ret = -ENOMEM;
+                       goto free_platform;
+               }
+
+               ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
+                                               NULL, anx7625_intr_hpd_isr,
+                                               IRQF_TRIGGER_FALLING |
+                                               IRQF_ONESHOT,
+                                               "anx7625-intp", platform);
+               if (ret) {
+                       DRM_DEV_ERROR(dev, "fail to request irq\n");
+                       goto free_wq;
+               }
+       }
+
+       if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
+               ret = -ENOMEM;
+               DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
+               goto free_wq;
+       }
+
+       if (platform->pdata.low_power_mode == 0) {
+               anx7625_disable_pd_protocol(platform);
+               atomic_set(&platform->power_status, 1);
+       }
+
+       /* Add work function */
+       if (platform->pdata.intp_irq)
+               queue_work(platform->workqueue, &platform->work);
+
+       platform->bridge.funcs = &anx7625_bridge_funcs;
+       platform->bridge.of_node = client->dev.of_node;
+       platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+       platform->bridge.type = DRM_MODE_CONNECTOR_eDP;
+       drm_bridge_add(&platform->bridge);
+
+       DRM_DEV_DEBUG_DRIVER(dev, "probe done\n");
+
+       return 0;
+
+free_wq:
+       if (platform->workqueue)
+               destroy_workqueue(platform->workqueue);
+
+free_platform:
+       kfree(platform);
+
+       return ret;
+}
+
+static int anx7625_i2c_remove(struct i2c_client *client)
+{
+       struct anx7625_data *platform = i2c_get_clientdata(client);
+
+       drm_bridge_remove(&platform->bridge);
+
+       if (platform->pdata.intp_irq)
+               destroy_workqueue(platform->workqueue);
+
+       anx7625_unregister_i2c_dummy_clients(platform);
+
+       kfree(platform);
+       return 0;
+}
+
+static const struct i2c_device_id anx7625_id[] = {
+       {"anx7625", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, anx7625_id);
+
+static const struct of_device_id anx_match_table[] = {
+       {.compatible = "analogix,anx7625",},
+       {},
+};
+
+static struct i2c_driver anx7625_driver = {
+       .driver = {
+               .name = "anx7625",
+               .of_match_table = anx_match_table,
+       },
+       .probe = anx7625_i2c_probe,
+       .remove = anx7625_i2c_remove,
+
+       .id_table = anx7625_id,
+};
+
+module_i2c_driver(anx7625_driver);
+
+MODULE_DESCRIPTION("MIPI2DP anx7625 driver");
+MODULE_AUTHOR("Xin Ji <xji@analogixsemi.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ANX7625_DRV_VERSION);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
new file mode 100644 (file)
index 0000000..193ad86
--- /dev/null
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
+ *
+ */
+
+#ifndef __ANX7625_H__
+#define __ANX7625_H__
+
+#define ANX7625_DRV_VERSION "0.1.04"
+
+/* Loading OCM re-trying times */
+#define OCM_LOADING_TIME 10
+
+/*********  ANX7625 Register  **********/
+#define TX_P0_ADDR                             0x70
+#define TX_P1_ADDR                             0x7A
+#define TX_P2_ADDR                             0x72
+
+#define RX_P0_ADDR                             0x7e
+#define RX_P1_ADDR                             0x84
+#define RX_P2_ADDR                             0x54
+
+#define RSVD_00_ADDR                           0x00
+#define RSVD_D1_ADDR                           0xD1
+#define RSVD_60_ADDR                           0x60
+#define RSVD_39_ADDR                           0x39
+#define RSVD_7F_ADDR                           0x7F
+
+#define TCPC_INTERFACE_ADDR                    0x58
+
+/* Clock frequency in Hz */
+#define XTAL_FRQ        (27 * 1000000)
+
+#define  POST_DIVIDER_MIN      1
+#define  POST_DIVIDER_MAX      16
+#define  PLL_OUT_FREQ_MIN      520000000UL
+#define  PLL_OUT_FREQ_MAX      730000000UL
+#define  PLL_OUT_FREQ_ABS_MIN  300000000UL
+#define  PLL_OUT_FREQ_ABS_MAX  800000000UL
+#define  MAX_UNSIGNED_24BIT    16777215UL
+
+/***************************************************************/
+/* Register definition of device address 0x58 */
+
+#define PRODUCT_ID_L 0x02
+#define PRODUCT_ID_H 0x03
+
+#define INTR_ALERT_1  0xCC
+#define INTR_SOFTWARE_INT BIT(3)
+#define INTR_RECEIVED_MSG BIT(5)
+
+#define SYSTEM_STSTUS 0x45
+#define INTERFACE_CHANGE_INT 0x44
+#define HPD_STATUS_CHANGE 0x80
+#define HPD_STATUS 0x80
+
+/******** END of I2C Address 0x58 ********/
+
+/***************************************************************/
+/* Register definition of device address 0x70 */
+#define  I2C_ADDR_70_DPTX              0x70
+
+#define SP_TX_LINK_BW_SET_REG 0xA0
+#define SP_TX_LANE_COUNT_SET_REG 0xA1
+
+#define M_VID_0 0xC0
+#define M_VID_1 0xC1
+#define M_VID_2 0xC2
+#define N_VID_0 0xC3
+#define N_VID_1 0xC4
+#define N_VID_2 0xC5
+
+/***************************************************************/
+/* Register definition of device address 0x72 */
+#define AUX_RST        0x04
+#define RST_CTRL2 0x07
+
+#define SP_TX_TOTAL_LINE_STA_L 0x24
+#define SP_TX_TOTAL_LINE_STA_H 0x25
+#define SP_TX_ACT_LINE_STA_L 0x26
+#define SP_TX_ACT_LINE_STA_H 0x27
+#define SP_TX_V_F_PORCH_STA 0x28
+#define SP_TX_V_SYNC_STA 0x29
+#define SP_TX_V_B_PORCH_STA 0x2A
+#define SP_TX_TOTAL_PIXEL_STA_L 0x2B
+#define SP_TX_TOTAL_PIXEL_STA_H 0x2C
+#define SP_TX_ACT_PIXEL_STA_L 0x2D
+#define SP_TX_ACT_PIXEL_STA_H 0x2E
+#define SP_TX_H_F_PORCH_STA_L 0x2F
+#define SP_TX_H_F_PORCH_STA_H 0x30
+#define SP_TX_H_SYNC_STA_L 0x31
+#define SP_TX_H_SYNC_STA_H 0x32
+#define SP_TX_H_B_PORCH_STA_L 0x33
+#define SP_TX_H_B_PORCH_STA_H 0x34
+
+#define SP_TX_VID_CTRL 0x84
+#define SP_TX_BPC_MASK 0xE0
+#define SP_TX_BPC_6    0x00
+#define SP_TX_BPC_8    0x20
+#define SP_TX_BPC_10   0x40
+#define SP_TX_BPC_12   0x60
+
+#define VIDEO_BIT_MATRIX_12 0x4c
+
+#define AUDIO_CHANNEL_STATUS_1 0xd0
+#define AUDIO_CHANNEL_STATUS_2 0xd1
+#define AUDIO_CHANNEL_STATUS_3 0xd2
+#define AUDIO_CHANNEL_STATUS_4 0xd3
+#define AUDIO_CHANNEL_STATUS_5 0xd4
+#define AUDIO_CHANNEL_STATUS_6 0xd5
+#define TDM_SLAVE_MODE 0x10
+#define I2S_SLAVE_MODE 0x08
+
+#define AUDIO_CONTROL_REGISTER 0xe6
+#define TDM_TIMING_MODE 0x08
+
+#define I2C_ADDR_72_DPTX              0x72
+
+#define HP_MIN                 8
+#define HBLANKING_MIN          80
+#define SYNC_LEN_DEF           32
+#define HFP_HBP_DEF            ((HBLANKING_MIN - SYNC_LEN_DEF) / 2)
+#define VIDEO_CONTROL_0        0x08
+
+#define  ACTIVE_LINES_L         0x14
+#define  ACTIVE_LINES_H         0x15  /* Bit[7:6] are reserved */
+#define  VERTICAL_FRONT_PORCH   0x16
+#define  VERTICAL_SYNC_WIDTH    0x17
+#define  VERTICAL_BACK_PORCH    0x18
+
+#define  HORIZONTAL_TOTAL_PIXELS_L    0x19
+#define  HORIZONTAL_TOTAL_PIXELS_H    0x1A  /* Bit[7:6] are reserved */
+#define  HORIZONTAL_ACTIVE_PIXELS_L   0x1B
+#define  HORIZONTAL_ACTIVE_PIXELS_H   0x1C  /* Bit[7:6] are reserved */
+#define  HORIZONTAL_FRONT_PORCH_L     0x1D
+#define  HORIZONTAL_FRONT_PORCH_H     0x1E  /* Bit[7:4] are reserved */
+#define  HORIZONTAL_SYNC_WIDTH_L      0x1F
+#define  HORIZONTAL_SYNC_WIDTH_H      0x20  /* Bit[7:4] are reserved */
+#define  HORIZONTAL_BACK_PORCH_L      0x21
+#define  HORIZONTAL_BACK_PORCH_H      0x22  /* Bit[7:4] are reserved */
+
+/******** END of I2C Address 0x72 *********/
+/***************************************************************/
+/* Register definition of device address 0x7e */
+
+#define  I2C_ADDR_7E_FLASH_CONTROLLER  0x7E
+
+#define FLASH_LOAD_STA 0x05
+#define FLASH_LOAD_STA_CHK     BIT(7)
+
+#define  XTAL_FRQ_SEL    0x3F
+/* bit field positions */
+#define  XTAL_FRQ_SEL_POS    5
+/* bit field values */
+#define  XTAL_FRQ_19M2   (0 << XTAL_FRQ_SEL_POS)
+#define  XTAL_FRQ_27M    (4 << XTAL_FRQ_SEL_POS)
+
+#define  R_DSC_CTRL_0    0x40
+#define  READ_STATUS_EN  7
+#define  CLK_1MEG_RB     6  /* 1MHz clock reset; 0=reset, 0=reset release */
+#define  DSC_BIST_DONE   1  /* Bit[5:1]: 1=DSC MBIST pass */
+#define  DSC_EN          0x01  /* 1=DSC enabled, 0=DSC disabled */
+
+#define OCM_FW_VERSION   0x31
+#define OCM_FW_REVERSION 0x32
+
+#define AP_AUX_ADDR_7_0   0x11
+#define AP_AUX_ADDR_15_8  0x12
+#define AP_AUX_ADDR_19_16 0x13
+
+/* Bit[0:3] AUX status, bit 4 op_en, bit 5 address only */
+#define AP_AUX_CTRL_STATUS 0x14
+#define AP_AUX_CTRL_OP_EN 0x10
+#define AP_AUX_CTRL_ADDRONLY 0x20
+
+#define AP_AUX_BUFF_START 0x15
+#define PIXEL_CLOCK_L 0x25
+#define PIXEL_CLOCK_H 0x26
+
+#define AP_AUX_COMMAND 0x27  /* com+len */
+/* Bit 0&1: 3D video structure */
+/* 0x01: frame packing,  0x02:Line alternative, 0x03:Side-by-side(full) */
+#define AP_AV_STATUS 0x28
+#define AP_VIDEO_CHG  BIT(2)
+#define AP_AUDIO_CHG  BIT(3)
+#define AP_MIPI_MUTE  BIT(4) /* 1:MIPI input mute, 0: ummute */
+#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in  0: no RX in */
+#define AP_DISABLE_PD BIT(6)
+#define AP_DISABLE_DISPLAY BIT(7)
+/***************************************************************/
+/* Register definition of device address 0x84 */
+#define  MIPI_PHY_CONTROL_3            0x03
+#define  MIPI_HS_PWD_CLK               7
+#define  MIPI_HS_RT_CLK                6
+#define  MIPI_PD_CLK                   5
+#define  MIPI_CLK_RT_MANUAL_PD_EN      4
+#define  MIPI_CLK_HS_MANUAL_PD_EN      3
+#define  MIPI_CLK_DET_DET_BYPASS       2
+#define  MIPI_CLK_MISS_CTRL            1
+#define  MIPI_PD_LPTX_CH_MANUAL_PD_EN  0
+
+#define  MIPI_LANE_CTRL_0              0x05
+#define  MIPI_TIME_HS_PRPR             0x08
+
+/*
+ * After MIPI RX protocol layer received video frames,
+ * Protocol layer starts to reconstruct video stream from PHY
+ */
+#define  MIPI_VIDEO_STABLE_CNT           0x0A
+
+#define  MIPI_LANE_CTRL_10               0x0F
+#define  MIPI_DIGITAL_ADJ_1   0x1B
+
+#define  MIPI_PLL_M_NUM_23_16   0x1E
+#define  MIPI_PLL_M_NUM_15_8    0x1F
+#define  MIPI_PLL_M_NUM_7_0     0x20
+#define  MIPI_PLL_N_NUM_23_16   0x21
+#define  MIPI_PLL_N_NUM_15_8    0x22
+#define  MIPI_PLL_N_NUM_7_0     0x23
+
+#define  MIPI_DIGITAL_PLL_6     0x2A
+/* Bit[7:6]: VCO band control, only effective */
+#define  MIPI_M_NUM_READY        0x10
+#define  MIPI_N_NUM_READY        0x08
+#define  STABLE_INTEGER_CNT_EN   0x04
+#define  MIPI_PLL_TEST_BIT       0
+/* Bit[1:0]: test point output select - */
+/* 00: VCO power, 01: dvdd_pdt, 10: dvdd, 11: vcox */
+
+#define  MIPI_DIGITAL_PLL_7      0x2B
+#define  MIPI_PLL_FORCE_N_EN     7
+#define  MIPI_PLL_FORCE_BAND_EN  6
+
+#define  MIPI_PLL_VCO_TUNE_REG   4
+/* Bit[5:4]: VCO metal capacitance - */
+/* 00: +20% fast, 01: +10% fast (default), 10: typical, 11: -10% slow */
+#define  MIPI_PLL_VCO_TUNE_REG_VAL   0x30
+
+#define  MIPI_PLL_PLL_LDO_BIT    2
+/* Bit[3:2]: vco_v2i power - */
+/* 00: 1.40V, 01: 1.45V (default), 10: 1.50V, 11: 1.55V */
+#define  MIPI_PLL_RESET_N        0x02
+#define  MIPI_FRQ_FORCE_NDET     0
+
+#define  MIPI_ALERT_CLR_0        0x2D
+#define  HS_link_error_clear     7
+/* This bit itself is S/C, and it clears 0x84:0x31[7] */
+
+#define  MIPI_ALERT_OUT_0        0x31
+#define  check_sum_err_hs_sync   7
+/* This bit is cleared by 0x84:0x2D[7] */
+
+#define  MIPI_DIGITAL_PLL_8    0x33
+#define  MIPI_POST_DIV_VAL     4
+/* N means divided by (n+1), n = 0~15 */
+#define  MIPI_EN_LOCK_FRZ      3
+#define  MIPI_FRQ_COUNTER_RST  2
+#define  MIPI_FRQ_SET_REG_8    1
+/* Bit 0 is reserved */
+
+#define  MIPI_DIGITAL_PLL_9    0x34
+
+#define  MIPI_DIGITAL_PLL_16   0x3B
+#define  MIPI_FRQ_FREEZE_NDET          7
+#define  MIPI_FRQ_REG_SET_ENABLE       6
+#define  MIPI_REG_FORCE_SEL_EN         5
+#define  MIPI_REG_SEL_DIV_REG          4
+#define  MIPI_REG_FORCE_PRE_DIV_EN     3
+/* Bit 2 is reserved */
+#define  MIPI_FREF_D_IND               1
+#define  REF_CLK_27000KHZ    1
+#define  REF_CLK_19200KHZ    0
+#define  MIPI_REG_PLL_PLL_TEST_ENABLE  0
+
+#define  MIPI_DIGITAL_PLL_18  0x3D
+#define  FRQ_COUNT_RB_SEL       7
+#define  REG_FORCE_POST_DIV_EN  6
+#define  MIPI_DPI_SELECT        5
+#define  SELECT_DSI  1
+#define  SELECT_DPI  0
+#define  REG_BAUD_DIV_RATIO     0
+
+#define  H_BLANK_L            0x3E
+/* For DSC only */
+#define  H_BLANK_H            0x3F
+/* For DSC only; note: bit[7:6] are reserved */
+#define  MIPI_SWAP  0x4A
+#define  MIPI_SWAP_CH0    7
+#define  MIPI_SWAP_CH1    6
+#define  MIPI_SWAP_CH2    5
+#define  MIPI_SWAP_CH3    4
+#define  MIPI_SWAP_CLK    3
+/* Bit[2:0] are reserved */
+
+/******** END of I2C Address 0x84 *********/
+
+/* DPCD regs */
+#define DPCD_DPCD_REV                  0x00
+#define DPCD_MAX_LINK_RATE             0x01
+#define DPCD_MAX_LANE_COUNT            0x02
+
+/*********  ANX7625 Register End  **********/
+
+/***************** Display *****************/
+enum audio_fs {
+       AUDIO_FS_441K  = 0x00,
+       AUDIO_FS_48K   = 0x02,
+       AUDIO_FS_32K   = 0x03,
+       AUDIO_FS_882K  = 0x08,
+       AUDIO_FS_96K   = 0x0a,
+       AUDIO_FS_1764K = 0x0c,
+       AUDIO_FS_192K  = 0x0e
+};
+
+enum audio_wd_len {
+       AUDIO_W_LEN_16_20MAX = 0x02,
+       AUDIO_W_LEN_18_20MAX = 0x04,
+       AUDIO_W_LEN_17_20MAX = 0x0c,
+       AUDIO_W_LEN_19_20MAX = 0x08,
+       AUDIO_W_LEN_20_20MAX = 0x0a,
+       AUDIO_W_LEN_20_24MAX = 0x03,
+       AUDIO_W_LEN_22_24MAX = 0x05,
+       AUDIO_W_LEN_21_24MAX = 0x0d,
+       AUDIO_W_LEN_23_24MAX = 0x09,
+       AUDIO_W_LEN_24_24MAX = 0x0b
+};
+
+#define I2S_CH_2       0x01
+#define TDM_CH_4       0x03
+#define TDM_CH_6       0x05
+#define TDM_CH_8       0x07
+
+#define MAX_DPCD_BUFFER_SIZE   16
+
+#define ONE_BLOCK_SIZE      128
+#define FOUR_BLOCK_SIZE     (128 * 4)
+
+#define MAX_EDID_BLOCK 3
+#define EDID_TRY_CNT   3
+#define SUPPORT_PIXEL_CLOCK    300000
+
+struct s_edid_data {
+       int edid_block_num;
+       u8 edid_raw_data[FOUR_BLOCK_SIZE];
+};
+
+/***************** Display End *****************/
+
+struct anx7625_platform_data {
+       struct gpio_desc *gpio_p_on;
+       struct gpio_desc *gpio_reset;
+       struct drm_bridge *panel_bridge;
+       int intp_irq;
+       u32 low_power_mode;
+       struct device_node *mipi_host_node;
+};
+
+struct anx7625_i2c_client {
+       struct i2c_client *tx_p0_client;
+       struct i2c_client *tx_p1_client;
+       struct i2c_client *tx_p2_client;
+       struct i2c_client *rx_p0_client;
+       struct i2c_client *rx_p1_client;
+       struct i2c_client *rx_p2_client;
+       struct i2c_client *tcpc_client;
+};
+
+struct anx7625_data {
+       struct anx7625_platform_data pdata;
+       atomic_t power_status;
+       int hpd_status;
+       int hpd_high_cnt;
+       /* Lock for work queue */
+       struct mutex lock;
+       struct i2c_client *client;
+       struct anx7625_i2c_client i2c;
+       struct i2c_client *last_client;
+       struct s_edid_data slimport_edid_p;
+       struct work_struct work;
+       struct workqueue_struct *workqueue;
+       char edid_block;
+       struct display_timing dt;
+       u8 display_timing_valid;
+       struct drm_bridge bridge;
+       u8 bridge_attached;
+       struct mipi_dsi_device *dsi;
+};
+
+#endif  /* __ANX7625_H__ */
index 9fef6413741dcbd674d78cae5d5478aa0bb63ce0..feb04f127b550e836043a0344e2fe7061b510027 100644 (file)
@@ -170,7 +170,7 @@ static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
        return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
 }
 
-static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
+static const struct hdmi_codec_ops dw_hdmi_i2s_ops = {
        .hw_params      = dw_hdmi_i2s_hw_params,
        .audio_startup  = dw_hdmi_i2s_audio_startup,
        .audio_shutdown = dw_hdmi_i2s_audio_shutdown,
index d89394bc5aa4d3a736320116007133008bd3a2c1..c1e35bdf9232a7e573a350cac928e0f2b1f5665e 100644 (file)
@@ -153,9 +153,10 @@ static const char * const tc358764_supplies[] = {
 struct tc358764 {
        struct device *dev;
        struct drm_bridge bridge;
+       struct drm_connector connector;
        struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
        struct gpio_desc *gpio_reset;
-       struct drm_bridge *panel_bridge;
+       struct drm_panel *panel;
        int error;
 };
 
@@ -209,6 +210,12 @@ static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
        return container_of(bridge, struct tc358764, bridge);
 }
 
+static inline
+struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
+{
+       return container_of(connector, struct tc358764, connector);
+}
+
 static int tc358764_init(struct tc358764 *ctx)
 {
        u32 v = 0;
@@ -271,11 +278,43 @@ static void tc358764_reset(struct tc358764 *ctx)
        usleep_range(1000, 2000);
 }
 
+static int tc358764_get_modes(struct drm_connector *connector)
+{
+       struct tc358764 *ctx = connector_to_tc358764(connector);
+
+       return drm_panel_get_modes(ctx->panel, connector);
+}
+
+static const
+struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
+       .get_modes = tc358764_get_modes,
+};
+
+static const struct drm_connector_funcs tc358764_connector_funcs = {
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void tc358764_disable(struct drm_bridge *bridge)
+{
+       struct tc358764 *ctx = bridge_to_tc358764(bridge);
+       int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
+
+       if (ret < 0)
+               dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
+}
+
 static void tc358764_post_disable(struct drm_bridge *bridge)
 {
        struct tc358764 *ctx = bridge_to_tc358764(bridge);
        int ret;
 
+       ret = drm_panel_unprepare(ctx->panel);
+       if (ret < 0)
+               dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
        tc358764_reset(ctx);
        usleep_range(10000, 15000);
        ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -296,28 +335,71 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
        ret = tc358764_init(ctx);
        if (ret < 0)
                dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
+       ret = drm_panel_prepare(ctx->panel);
+       if (ret < 0)
+               dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
+}
+
+static void tc358764_enable(struct drm_bridge *bridge)
+{
+       struct tc358764 *ctx = bridge_to_tc358764(bridge);
+       int ret = drm_panel_enable(ctx->panel);
+
+       if (ret < 0)
+               dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
 }
 
 static int tc358764_attach(struct drm_bridge *bridge,
                           enum drm_bridge_attach_flags flags)
+{
+       struct tc358764 *ctx = bridge_to_tc358764(bridge);
+       struct drm_device *drm = bridge->dev;
+       int ret;
+
+       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+               DRM_ERROR("Fix bridge driver to make connector optional!");
+               return -EINVAL;
+       }
+
+       ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+       ret = drm_connector_init(drm, &ctx->connector,
+                                &tc358764_connector_funcs,
+                                DRM_MODE_CONNECTOR_LVDS);
+       if (ret) {
+               DRM_ERROR("Failed to initialize connector\n");
+               return ret;
+       }
+
+       drm_connector_helper_add(&ctx->connector,
+                                &tc358764_connector_helper_funcs);
+       drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
+       ctx->connector.funcs->reset(&ctx->connector);
+       drm_connector_register(&ctx->connector);
+
+       return 0;
+}
+
+static void tc358764_detach(struct drm_bridge *bridge)
 {
        struct tc358764 *ctx = bridge_to_tc358764(bridge);
 
-       return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
-                                bridge, flags);
+       drm_connector_unregister(&ctx->connector);
+       ctx->panel = NULL;
+       drm_connector_put(&ctx->connector);
 }
 
 static const struct drm_bridge_funcs tc358764_bridge_funcs = {
+       .disable = tc358764_disable,
        .post_disable = tc358764_post_disable,
+       .enable = tc358764_enable,
        .pre_enable = tc358764_pre_enable,
        .attach = tc358764_attach,
+       .detach = tc358764_detach,
 };
 
 static int tc358764_parse_dt(struct tc358764 *ctx)
 {
-       struct drm_bridge *panel_bridge;
        struct device *dev = ctx->dev;
-       struct drm_panel *panel;
        int ret;
 
        ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -326,16 +408,12 @@ static int tc358764_parse_dt(struct tc358764 *ctx)
                return PTR_ERR(ctx->gpio_reset);
        }
 
-       ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
-       if (ret)
-               return ret;
-
-       panel_bridge = devm_drm_panel_bridge_add(dev, panel);
-       if (IS_ERR(panel_bridge))
-               return PTR_ERR(panel_bridge);
+       ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
+                                         NULL);
+       if (ret && ret != -EPROBE_DEFER)
+               dev_err(dev, "cannot find panel (%d)\n", ret);
 
-       ctx->panel_bridge = panel_bridge;
-       return 0;
+       return ret;
 }
 
 static int tc358764_configure_regulators(struct tc358764 *ctx)
@@ -381,7 +459,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
                return ret;
 
        ctx->bridge.funcs = &tc358764_bridge_funcs;
-       ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
        ctx->bridge.of_node = dev->of_node;
 
        drm_bridge_add(&ctx->bridge);
index ecdf9b01340f5e6413f10c26054a722560ea72d2..6ca1debd0f88d7b3946a184f0a0fb6d0b41ea5d2 100644 (file)
 #define SN_NUM_GPIOS                   4
 #define SN_GPIO_PHYSICAL_OFFSET                1
 
+#define SN_LINK_TRAINING_TRIES         10
+
 /**
  * struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver.
  * @dev:          Pointer to our device.
@@ -673,6 +675,7 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
 {
        unsigned int val;
        int ret;
+       int i;
 
        /* set dp clk frequency value */
        regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
@@ -689,19 +692,34 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
                goto exit;
        }
 
-       /* Semi auto link training mode */
-       regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
-       ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
-                                      val == ML_TX_MAIN_LINK_OFF ||
-                                      val == ML_TX_NORMAL_MODE, 1000,
-                                      500 * 1000);
-       if (ret) {
-               *last_err_str = "Training complete polling failed";
-       } else if (val == ML_TX_MAIN_LINK_OFF) {
-               *last_err_str = "Link training failed, link is off";
-               ret = -EIO;
+       /*
+        * We'll try to link train several times.  As part of link training
+        * the bridge chip will write DP_SET_POWER_D0 to DP_SET_POWER.  If
+        * the panel isn't ready quite it might respond NAK here which means
+        * we need to try again.
+        */
+       for (i = 0; i < SN_LINK_TRAINING_TRIES; i++) {
+               /* Semi auto link training mode */
+               regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+               ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+                                              val == ML_TX_MAIN_LINK_OFF ||
+                                              val == ML_TX_NORMAL_MODE, 1000,
+                                              500 * 1000);
+               if (ret) {
+                       *last_err_str = "Training complete polling failed";
+               } else if (val == ML_TX_MAIN_LINK_OFF) {
+                       *last_err_str = "Link training failed, link is off";
+                       ret = -EIO;
+                       continue;
+               }
+
+               break;
        }
 
+       /* If we saw quite a few retries, add a note about it */
+       if (!ret && i > SN_LINK_TRAINING_TRIES / 2)
+               DRM_DEV_INFO(pdata->dev, "Link training needed %d retries\n", i);
+
 exit:
        /* Disable the PLL if we failed */
        if (ret)
@@ -816,8 +834,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
 {
        struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
 
-       if (pdata->refclk)
-               clk_disable_unprepare(pdata->refclk);
+       clk_disable_unprepare(pdata->refclk);
 
        pm_runtime_put_sync(pdata->dev);
 }
index 58527f151984c84100421e149e074921d3e35d46..b2d20eb6c807a84d75591c242ddf92ec3a5b46ff 100644 (file)
@@ -281,6 +281,10 @@ EXPORT_SYMBOL(__drm_atomic_state_free);
  * needed. It will also grab the relevant CRTC lock to make sure that the state
  * is consistent.
  *
+ * WARNING: Drivers may only add new CRTC states to a @state if
+ * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit
+ * not created by userspace through an IOCTL call.
+ *
  * Returns:
  *
  * Either the allocated state or the error code encoded into the pointer. When
@@ -1262,10 +1266,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
        struct drm_crtc_state *new_crtc_state;
        struct drm_connector *conn;
        struct drm_connector_state *conn_state;
+       unsigned requested_crtc = 0;
+       unsigned affected_crtc = 0;
        int i, ret = 0;
 
        DRM_DEBUG_ATOMIC("checking %p\n", state);
 
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+               requested_crtc |= drm_crtc_mask(crtc);
+
        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
                ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
                if (ret) {
@@ -1313,6 +1322,26 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
                }
        }
 
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+               affected_crtc |= drm_crtc_mask(crtc);
+
+       /*
+        * For commits that allow modesets drivers can add other CRTCs to the
+        * atomic commit, e.g. when they need to reallocate global resources.
+        * This can cause spurious EBUSY, which robs compositors of a very
+        * effective sanity check for their drawing loop. Therefor only allow
+        * drivers to add unrelated CRTC states for modeset commits.
+        *
+        * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output
+        * so compositors know what's going on.
+        */
+       if (affected_crtc != requested_crtc) {
+               DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
+                                requested_crtc, affected_crtc);
+               WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
+                    requested_crtc, affected_crtc);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1613,11 +1642,11 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  * to dmesg in case of error irq's.  (Hint, you probably want to
  * ratelimit this!)
  *
- * The caller must drm_modeset_lock_all(), or if this is called
- * from error irq handler, it should not be enabled by default.
- * (Ie. if you are debugging errors you might not care that this
- * is racey.  But calling this without all modeset locks held is
- * not inherently safe.)
+ * The caller must wrap this drm_modeset_lock_all_ctx() and
+ * drm_modeset_drop_locks(). If this is called from error irq handler, it should
+ * not be enabled by default - if you are debugging errors you might
+ * not care that this is racey, but calling this without all modeset locks held
+ * is inherently unsafe.
  */
 void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
 {
index f9170b4b22e7e8991f506f998bd24924212900d4..a7bcb4b4586cb74a3dcdfdb2a135ffe2f5e5c79c 100644 (file)
@@ -1093,7 +1093,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
                if (new_crtc_state->enable && funcs->prepare)
                        funcs->prepare(crtc);
                else if (funcs->atomic_disable)
-                       funcs->atomic_disable(crtc, old_crtc_state);
+                       funcs->atomic_disable(crtc, old_state);
                else if (funcs->disable)
                        funcs->disable(crtc);
                else if (funcs->dpms)
@@ -1358,7 +1358,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
                        DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
                                         crtc->base.id, crtc->name);
                        if (funcs->atomic_enable)
-                               funcs->atomic_enable(crtc, old_crtc_state);
+                               funcs->atomic_enable(crtc, old_state);
                        else if (funcs->commit)
                                funcs->commit(crtc);
                }
@@ -1736,8 +1736,11 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
         * overridden by a previous synchronous update's state.
         */
        if (old_plane_state->commit &&
-           !try_wait_for_completion(&old_plane_state->commit->hw_done))
+           !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n",
+                       plane->base.id, plane->name);
                return -EBUSY;
+       }
 
        return funcs->atomic_async_check(plane, new_plane_state);
 }
@@ -1955,6 +1958,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
                         * commit with nonblocking ones. */
                        if (!completed && nonblock) {
                                spin_unlock(&crtc->commit_lock);
+                               DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
+                                       crtc->base.id, crtc->name);
+
                                return -EBUSY;
                        }
                } else if (i == 1) {
@@ -2129,8 +2135,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
                /* Userspace is not allowed to get ahead of the previous
                 * commit with nonblocking ones. */
                if (nonblock && old_conn_state->commit &&
-                   !try_wait_for_completion(&old_conn_state->commit->flip_done))
+                   !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
+                       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
+                               conn->base.id, conn->name);
+
                        return -EBUSY;
+               }
 
                /* Always track connectors explicitly for e.g. link retraining. */
                commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
@@ -2144,8 +2154,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
                /* Userspace is not allowed to get ahead of the previous
                 * commit with nonblocking ones. */
                if (nonblock && old_plane_state->commit &&
-                   !try_wait_for_completion(&old_plane_state->commit->flip_done))
+                   !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
+                       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
+                               plane->base.id, plane->name);
+
                        return -EBUSY;
+               }
 
                /* Always track planes explicitly for async pageflip support. */
                commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
index 717c4e7271b0422692c485d8c8cb426de0bc006f..1913d8b4e16a8683faa8168ed8edcaf17f9e09b3 100644 (file)
@@ -960,6 +960,11 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
  *     drm_connector_update_edid_property(), usually after having parsed
  *     the EDID using drm_add_edid_modes(). Userspace cannot change this
  *     property.
+ *
+ *     User-space should not parse the EDID to obtain information exposed via
+ *     other KMS properties (because the kernel might apply limits, quirks or
+ *     fixups to the EDID). For instance, user-space should not try to parse
+ *     mode lists from the EDID.
  * DPMS:
  *     Legacy property for setting the power state of the connector. For atomic
  *     drivers this is only provided for backwards compatibility with existing
index 2510717d5a08fa148af971cd23d66c6baa4def80..e25181bf2c480c940e53921ebd8cf94b10dae7ad 100644 (file)
@@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
 
        mutex_lock(&aux_idr_mutex);
        aux_dev = idr_find(&aux_idr, index);
-       if (!kref_get_unless_zero(&aux_dev->refcount))
+       if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
                aux_dev = NULL;
        mutex_unlock(&aux_idr_mutex);
 
index e875425336406fdd8d483257fde972ab03d5dc25..153b6065ba2927f324b5f99a5c78f6daed0b6287 100644 (file)
@@ -3686,9 +3686,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
                WARN_ON(mgr->mst_primary);
 
                /* get dpcd info */
-               ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
-               if (ret != DP_RECEIVER_CAP_SIZE) {
-                       DRM_DEBUG_KMS("failed to read DPCD\n");
+               ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
+               if (ret < 0) {
+                       drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
+                                   mgr->aux->name, ret);
                        goto out_unlock;
                }
 
index 631125b46e04c6df67a5a14a13cc4b98b6061137..c7363af731b42dfca3e9f48e400d393987fcdd15 100644 (file)
@@ -1844,7 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector,
        if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
                return;
 
-       drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name);
+       drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
        for (i = 0; i < num_blocks; i++) {
                u8 *block = edid + i * EDID_LENGTH;
                char prefix[20];
@@ -1856,7 +1856,7 @@ static void connector_bad_edid(struct drm_connector *connector,
                else
                        sprintf(prefix, "\t[%02x] GOOD ", i);
 
-               print_hex_dump(KERN_WARNING,
+               print_hex_dump(KERN_DEBUG,
                               prefix, DUMP_PREFIX_NONE, 16, 1,
                               block, EDID_LENGTH, false);
        }
index 1543d9d109705cc27085f09ecbec20ce0d830c1e..92e0db30fdf71ca4aa6be0a4a61b1cfa1954b4fd 100644 (file)
@@ -281,18 +281,12 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
 #ifdef CONFIG_MAGIC_SYSRQ
-/*
- * restore fbcon display for all kms driver's using this helper, used for sysrq
- * and panic handling.
- */
-static bool drm_fb_helper_force_kernel_mode(void)
+/* emergency restore, don't bother with error reporting */
+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
-       bool ret, error = false;
        struct drm_fb_helper *helper;
 
-       if (list_empty(&kernel_fb_helper_list))
-               return false;
-
+       mutex_lock(&kernel_fb_helper_lock);
        list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
                struct drm_device *dev = helper->dev;
 
@@ -300,22 +294,12 @@ static bool drm_fb_helper_force_kernel_mode(void)
                        continue;
 
                mutex_lock(&helper->lock);
-               ret = drm_client_modeset_commit_locked(&helper->client);
-               if (ret)
-                       error = true;
+               drm_client_modeset_commit_locked(&helper->client);
                mutex_unlock(&helper->lock);
        }
-       return error;
+       mutex_unlock(&kernel_fb_helper_lock);
 }
 
-static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-{
-       bool ret;
-
-       ret = drm_fb_helper_force_kernel_mode();
-       if (ret == true)
-               DRM_ERROR("Failed to restore crtc configuration\n");
-}
 static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
 
 static void drm_fb_helper_sysrq(int dummy1)
index 722c7ebe4e88930214f3c06a3fcc73aad3898afe..03262472059ca29cad90e8191a49b18dcaa04543 100644 (file)
@@ -202,6 +202,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
                { .format = DRM_FORMAT_XBGR16161616F,   .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
                { .format = DRM_FORMAT_ARGB16161616F,   .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_ABGR16161616F,   .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+               { .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_RGB888_A8,       .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_BGR888_A8,       .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_XRGB8888_A8,     .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
index 69c2c079d8036bcde1d17b717c50ea5168cea2a1..d586068f55091242c081650fd86679e70f26fac5 100644 (file)
@@ -247,12 +247,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
 {
        struct drm_file *file_priv = data;
        struct drm_gem_object *obj = ptr;
-       struct drm_device *dev = obj->dev;
 
-       if (obj->funcs && obj->funcs->close)
+       if (obj->funcs->close)
                obj->funcs->close(obj, file_priv);
-       else if (dev->driver->gem_close_object)
-               dev->driver->gem_close_object(obj, file_priv);
 
        drm_gem_remove_prime_handles(obj, file_priv);
        drm_vma_node_revoke(&obj->vma_node, file_priv);
@@ -403,14 +400,10 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
        if (ret)
                goto err_remove;
 
-       if (obj->funcs && obj->funcs->open) {
+       if (obj->funcs->open) {
                ret = obj->funcs->open(obj, file_priv);
                if (ret)
                        goto err_revoke;
-       } else if (dev->driver->gem_open_object) {
-               ret = dev->driver->gem_open_object(obj, file_priv);
-               if (ret)
-                       goto err_revoke;
        }
 
        *handlep = handle;
@@ -982,12 +975,11 @@ drm_gem_object_free(struct kref *kref)
 {
        struct drm_gem_object *obj =
                container_of(kref, struct drm_gem_object, refcount);
-       struct drm_device *dev = obj->dev;
 
-       if (obj->funcs)
-               obj->funcs->free(obj);
-       else if (dev->driver->gem_free_object_unlocked)
-               dev->driver->gem_free_object_unlocked(obj);
+       if (WARN_ON(!obj->funcs->free))
+               return;
+
+       obj->funcs->free(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
@@ -1049,9 +1041,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
  * @obj_size: the object size to be mapped, in bytes
  * @vma: VMA for the area to be mapped
  *
- * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
- * provided by the driver. Depending on their requirements, drivers can either
- * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * Set up the VMA to prepare mapping of the GEM object using the GEM object's
+ * vm_ops. Depending on their requirements, GEM objects can either
+ * provide a fault handler in their vm_ops (in which case any accesses to
  * the object will be trapped, to perform migration, GTT binding, surface
  * register allocation, or performance monitoring), or mmap the buffer memory
  * synchronously after calling drm_gem_mmap_obj.
@@ -1065,12 +1057,11 @@ EXPORT_SYMBOL(drm_gem_vm_close);
  * callers must verify access restrictions before calling this helper.
  *
  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
- * size, or if no gem_vm_ops are provided.
+ * size, or if no vm_ops are provided.
  */
 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
                     struct vm_area_struct *vma)
 {
-       struct drm_device *dev = obj->dev;
        int ret;
 
        /* Check for valid size. */
@@ -1087,7 +1078,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 
        vma->vm_private_data = obj;
 
-       if (obj->funcs && obj->funcs->mmap) {
+       if (obj->funcs->mmap) {
                ret = obj->funcs->mmap(obj, vma);
                if (ret) {
                        drm_gem_object_put(obj);
@@ -1095,10 +1086,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
                }
                WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
        } else {
-               if (obj->funcs && obj->funcs->vm_ops)
+               if (obj->funcs->vm_ops)
                        vma->vm_ops = obj->funcs->vm_ops;
-               else if (dev->driver->gem_vm_ops)
-                       vma->vm_ops = dev->driver->gem_vm_ops;
                else {
                        drm_gem_object_put(obj);
                        return -EINVAL;
@@ -1198,36 +1187,30 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
        drm_printf_indent(p, indent, "imported=%s\n",
                          obj->import_attach ? "yes" : "no");
 
-       if (obj->funcs && obj->funcs->print_info)
+       if (obj->funcs->print_info)
                obj->funcs->print_info(p, indent, obj);
 }
 
 int drm_gem_pin(struct drm_gem_object *obj)
 {
-       if (obj->funcs && obj->funcs->pin)
+       if (obj->funcs->pin)
                return obj->funcs->pin(obj);
-       else if (obj->dev->driver->gem_prime_pin)
-               return obj->dev->driver->gem_prime_pin(obj);
        else
                return 0;
 }
 
 void drm_gem_unpin(struct drm_gem_object *obj)
 {
-       if (obj->funcs && obj->funcs->unpin)
+       if (obj->funcs->unpin)
                obj->funcs->unpin(obj);
-       else if (obj->dev->driver->gem_prime_unpin)
-               obj->dev->driver->gem_prime_unpin(obj);
 }
 
 void *drm_gem_vmap(struct drm_gem_object *obj)
 {
        void *vaddr;
 
-       if (obj->funcs && obj->funcs->vmap)
+       if (obj->funcs->vmap)
                vaddr = obj->funcs->vmap(obj);
-       else if (obj->dev->driver->gem_prime_vmap)
-               vaddr = obj->dev->driver->gem_prime_vmap(obj);
        else
                vaddr = ERR_PTR(-EOPNOTSUPP);
 
@@ -1242,10 +1225,8 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
        if (!vaddr)
                return;
 
-       if (obj->funcs && obj->funcs->vunmap)
+       if (obj->funcs->vunmap)
                obj->funcs->vunmap(obj, vaddr);
-       else if (obj->dev->driver->gem_prime_vunmap)
-               obj->dev->driver->gem_prime_vunmap(obj, vaddr);
 }
 
 /**
index 59b9ca207b42dd374f8c5233995f05433d12a46f..2165633c9b9e84a861f73b01be808854632ac27d 100644 (file)
@@ -171,17 +171,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  * GEM object state and frees the memory used to store the object itself.
  * If the buffer is imported and the virtual address is set, it is released.
  * Drivers using the CMA helpers should set this as their
- * &drm_driver.gem_free_object_unlocked callback.
+ * &drm_gem_object_funcs.free callback.
  */
 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
-       struct drm_gem_cma_object *cma_obj;
-
-       cma_obj = to_drm_gem_cma_obj(gem_obj);
+       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
 
        if (gem_obj->import_attach) {
                if (cma_obj->vaddr)
-                       dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
+                       dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
                drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
        } else if (cma_obj->vaddr) {
                dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
@@ -419,7 +418,7 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
  *
  * This function exports a scatter/gather table suitable for PRIME usage by
  * calling the standard DMA mapping API. Drivers using the CMA helpers should
- * set this as their &drm_driver.gem_prime_get_sg_table callback.
+ * set this as their &drm_gem_object_funcs.get_sg_table callback.
  *
  * Returns:
  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
@@ -525,7 +524,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  * virtual address space. Since the CMA buffers are already mapped into the
  * kernel virtual address space this simply returns the cached virtual
  * address. Drivers using the CMA helpers should set this as their DRM
- * driver's &drm_driver.gem_prime_vmap callback.
+ * driver's &drm_gem_object_funcs.vmap callback.
  *
  * Returns:
  * The kernel virtual address of the CMA GEM object's backing store.
@@ -547,7 +546,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
  * This function removes a buffer exported via DRM PRIME from the kernel's
  * virtual address space. This is a no-op because CMA buffers cannot be
  * unmapped from kernel space. Drivers using the CMA helpers should set this
- * as their &drm_driver.gem_prime_vunmap callback.
+ * as their &drm_gem_object_funcs.vunmap callback.
  */
 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 {
@@ -617,22 +616,23 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
 {
        struct drm_gem_cma_object *cma_obj;
        struct drm_gem_object *obj;
-       void *vaddr;
+       struct dma_buf_map map;
+       int ret;
 
-       vaddr = dma_buf_vmap(attach->dmabuf);
-       if (!vaddr) {
+       ret = dma_buf_vmap(attach->dmabuf, &map);
+       if (ret) {
                DRM_ERROR("Failed to vmap PRIME buffer\n");
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(ret);
        }
 
        obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
        if (IS_ERR(obj)) {
-               dma_buf_vunmap(attach->dmabuf, vaddr);
+               dma_buf_vunmap(attach->dmabuf, &map);
                return obj;
        }
 
        cma_obj = to_drm_gem_cma_obj(obj);
-       cma_obj->vaddr = vaddr;
+       cma_obj->vaddr = map.vaddr;
 
        return obj;
 }
index e00616d94f26ef9b21584fd2a0f8311b5a714d32..8233bda4692f71dab0a5fc85f3e3cce68ec427d6 100644 (file)
@@ -261,13 +261,16 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
-       int ret;
+       struct dma_buf_map map;
+       int ret = 0;
 
        if (shmem->vmap_use_count++ > 0)
                return shmem->vaddr;
 
        if (obj->import_attach) {
-               shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+               ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
+               if (!ret)
+                       shmem->vaddr = map.vaddr;
        } else {
                pgprot_t prot = PAGE_KERNEL;
 
@@ -279,11 +282,12 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
                        prot = pgprot_writecombine(prot);
                shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
                                    VM_MAP, prot);
+               if (!shmem->vaddr)
+                       ret = -ENOMEM;
        }
 
-       if (!shmem->vaddr) {
-               DRM_DEBUG_KMS("Failed to vmap pages\n");
-               ret = -ENOMEM;
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
                goto err_put_pages;
        }
 
@@ -333,6 +337,7 @@ EXPORT_SYMBOL(drm_gem_shmem_vmap);
 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
 
        if (WARN_ON_ONCE(!shmem->vmap_use_count))
                return;
@@ -341,7 +346,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
                return;
 
        if (obj->import_attach)
-               dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+               dma_buf_vunmap(obj->import_attach->dmabuf, &map);
        else
                vunmap(shmem->vaddr);
 
index 50cad0e4a92e381677a181be89d2155c05fc4d4f..16d68c04ea5deaed2c8292d054b52c5eaf6b065d 100644 (file)
@@ -15,7 +15,6 @@
 #include <drm/drm_plane.h>
 #include <drm/drm_prime.h>
 #include <drm/drm_simple_kms_helper.h>
-#include <drm/ttm/ttm_page_alloc.h>
 
 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
 
@@ -140,22 +139,19 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
        unsigned int c = 0;
 
        if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
-               pl_flag = TTM_PL_FLAG_TOPDOWN;
+               invariant_flags = TTM_PL_FLAG_TOPDOWN;
 
        gbo->placement.placement = gbo->placements;
        gbo->placement.busy_placement = gbo->placements;
 
        if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
                gbo->placements[c].mem_type = TTM_PL_VRAM;
-               gbo->placements[c++].flags = TTM_PL_FLAG_WC |
-                                            TTM_PL_FLAG_UNCACHED |
-                                            invariant_flags;
+               gbo->placements[c++].flags = invariant_flags;
        }
 
        if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
                gbo->placements[c].mem_type = TTM_PL_SYSTEM;
-               gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
-                                            invariant_flags;
+               gbo->placements[c++].flags = invariant_flags;
        }
 
        gbo->placement.num_placement = c;
@@ -167,58 +163,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
        }
 }
 
-/*
- * Note that on error, drm_gem_vram_init will free the buffer object.
- */
-
-static int drm_gem_vram_init(struct drm_device *dev,
-                            struct drm_gem_vram_object *gbo,
-                            size_t size, unsigned long pg_align)
-{
-       struct drm_vram_mm *vmm = dev->vram_mm;
-       struct ttm_bo_device *bdev;
-       int ret;
-       size_t acc_size;
-
-       if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
-               kfree(gbo);
-               return -EINVAL;
-       }
-       bdev = &vmm->bdev;
-
-       gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
-
-       ret = drm_gem_object_init(dev, &gbo->bo.base, size);
-       if (ret) {
-               kfree(gbo);
-               return ret;
-       }
-
-       acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
-
-       gbo->bo.bdev = bdev;
-       drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
-                              DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-
-       ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
-                         &gbo->placement, pg_align, false, acc_size,
-                         NULL, NULL, ttm_buffer_object_destroy);
-       if (ret)
-               /*
-                * A failing ttm_bo_init will call ttm_buffer_object_destroy
-                * to release gbo->bo.base and kfree gbo.
-                */
-               return ret;
-
-       return 0;
-}
-
 /**
  * drm_gem_vram_create() - Creates a VRAM-backed GEM object
  * @dev:               the DRM device
  * @size:              the buffer size in bytes
  * @pg_align:          the buffer's alignment in multiples of the page size
  *
+ * GEM objects are allocated by calling struct drm_driver.gem_create_object,
+ * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM
+ * object functions in struct drm_driver.gem_create_object. If no functions
+ * are set, the new GEM object will use the default functions from GEM VRAM
+ * helpers.
+ *
  * Returns:
  * A new instance of &struct drm_gem_vram_object on success, or
  * an ERR_PTR()-encoded error code otherwise.
@@ -228,11 +184,17 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
                                                unsigned long pg_align)
 {
        struct drm_gem_vram_object *gbo;
+       struct drm_gem_object *gem;
+       struct drm_vram_mm *vmm = dev->vram_mm;
+       struct ttm_bo_device *bdev;
        int ret;
+       size_t acc_size;
+
+       if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+               return ERR_PTR(-EINVAL);
 
        if (dev->driver->gem_create_object) {
-               struct drm_gem_object *gem =
-                       dev->driver->gem_create_object(dev, size);
+               gem = dev->driver->gem_create_object(dev, size);
                if (!gem)
                        return ERR_PTR(-ENOMEM);
                gbo = drm_gem_vram_of_gem(gem);
@@ -240,10 +202,32 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
                gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
                if (!gbo)
                        return ERR_PTR(-ENOMEM);
+               gem = &gbo->bo.base;
        }
 
-       ret = drm_gem_vram_init(dev, gbo, size, pg_align);
-       if (ret < 0)
+       if (!gem->funcs)
+               gem->funcs = &drm_gem_vram_object_funcs;
+
+       ret = drm_gem_object_init(dev, gem, size);
+       if (ret) {
+               kfree(gbo);
+               return ERR_PTR(ret);
+       }
+
+       bdev = &vmm->bdev;
+       acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
+
+       gbo->bo.bdev = bdev;
+       drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
+
+       /*
+        * A failing ttm_bo_init will call ttm_buffer_object_destroy
+        * to release gbo->bo.base and kfree gbo.
+        */
+       ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
+                         &gbo->placement, pg_align, false, acc_size,
+                         NULL, NULL, ttm_buffer_object_destroy);
+       if (ret)
                return ERR_PTR(ret);
 
        return gbo;
@@ -301,7 +285,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
  */
 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
 {
-       if (WARN_ON_ONCE(!gbo->pin_count))
+       if (WARN_ON_ONCE(!gbo->bo.pin_count))
                return (s64)-ENODEV;
        return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
 }
@@ -310,24 +294,21 @@ EXPORT_SYMBOL(drm_gem_vram_offset);
 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
                                   unsigned long pl_flag)
 {
-       int i, ret;
        struct ttm_operation_ctx ctx = { false, false };
+       int ret;
 
-       if (gbo->pin_count)
+       if (gbo->bo.pin_count)
                goto out;
 
        if (pl_flag)
                drm_gem_vram_placement(gbo, pl_flag);
 
-       for (i = 0; i < gbo->placement.num_placement; ++i)
-               gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
        ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
        if (ret < 0)
                return ret;
 
 out:
-       ++gbo->pin_count;
+       ttm_bo_pin(&gbo->bo);
 
        return 0;
 }
@@ -369,26 +350,9 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
 }
 EXPORT_SYMBOL(drm_gem_vram_pin);
 
-static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
 {
-       int i, ret;
-       struct ttm_operation_ctx ctx = { false, false };
-
-       if (WARN_ON_ONCE(!gbo->pin_count))
-               return 0;
-
-       --gbo->pin_count;
-       if (gbo->pin_count)
-               return 0;
-
-       for (i = 0; i < gbo->placement.num_placement ; ++i)
-               gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-
-       ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
-       if (ret < 0)
-               return ret;
-
-       return 0;
+       ttm_bo_unpin(&gbo->bo);
 }
 
 /**
@@ -406,10 +370,11 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
        ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
        if (ret)
                return ret;
-       ret = drm_gem_vram_unpin_locked(gbo);
+
+       drm_gem_vram_unpin_locked(gbo);
        ttm_bo_unreserve(&gbo->bo);
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(drm_gem_vram_unpin);
 
@@ -619,6 +584,23 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
        kmap->virtual = NULL;
 }
 
+static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
+                                      bool evict,
+                                      struct ttm_operation_ctx *ctx,
+                                      struct ttm_resource *new_mem)
+{
+       int ret;
+
+       drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+       ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
+       if (ret) {
+               swap(*new_mem, gbo->bo.mem);
+               drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
+               swap(*new_mem, gbo->bo.mem);
+       }
+       return ret;
+}
+
 /*
  * Helpers for struct drm_gem_object_funcs
  */
@@ -941,7 +923,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
        if (!tt)
                return NULL;
 
-       ret = ttm_tt_init(tt, bo, page_flags);
+       ret = ttm_tt_init(tt, bo, page_flags, ttm_cached);
        if (ret < 0)
                goto err_ttm_tt_init;
 
@@ -966,9 +948,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
        drm_gem_vram_bo_driver_evict_flags(gbo, placement);
 }
 
-static void bo_driver_move_notify(struct ttm_buffer_object *bo,
-                                 bool evict,
-                                 struct ttm_resource *new_mem)
+static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
 {
        struct drm_gem_vram_object *gbo;
 
@@ -978,7 +958,19 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo,
 
        gbo = drm_gem_vram_of_bo(bo);
 
-       drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+       drm_gem_vram_bo_driver_move_notify(gbo, false, NULL);
+}
+
+static int bo_driver_move(struct ttm_buffer_object *bo,
+                         bool evict,
+                         struct ttm_operation_ctx *ctx,
+                         struct ttm_resource *new_mem)
+{
+       struct drm_gem_vram_object *gbo;
+
+       gbo = drm_gem_vram_of_bo(bo);
+
+       return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
 }
 
 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -992,6 +984,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
        case TTM_PL_VRAM:
                mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
                mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_write_combined;
                break;
        default:
                return -EINVAL;
@@ -1005,7 +998,8 @@ static struct ttm_bo_driver bo_driver = {
        .ttm_tt_destroy = bo_driver_ttm_tt_destroy,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = bo_driver_evict_flags,
-       .move_notify = bo_driver_move_notify,
+       .move = bo_driver_move,
+       .delete_mem_notify = bo_driver_delete_mem_notify,
        .io_mem_reserve = bo_driver_io_mem_reserve,
 };
 
@@ -1050,10 +1044,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
        vmm->vram_base = vram_base;
        vmm->vram_size = vram_size;
 
-       ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+       ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
                                 dev->anon_inode->i_mapping,
                                 dev->vma_offset_manager,
-                                true);
+                                false, true);
        if (ret)
                return ret;
 
index b65865c630b0aae2830a30f1e6f855584ced85d5..2bdac35577659ed0992d4e71c550ef0dd0ba2a42 100644 (file)
@@ -53,15 +53,15 @@ void drm_lastclose(struct drm_device *dev);
 #ifdef CONFIG_PCI
 
 /* drm_pci.c */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv);
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
 void drm_pci_agp_destroy(struct drm_device *dev);
 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
 
 #else
 
-static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
-                                  struct drm_file *file_priv)
+static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+                                         struct drm_file *file_priv)
 {
        return -EINVAL;
 }
index 789ee65ac1f572529b8df474349afdea500cfe6b..d273d1a8603a9592b22c7cc3fe23eecc2e0959c7 100644 (file)
@@ -578,7 +578,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
 
index c250fb5a88ca84b5e8e7e8d16149a2521b0106b5..6dba4b8ce4fe2fd438f4d09ff870151a5e7255ec 100644 (file)
@@ -139,7 +139,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
 }
 
 /**
- * drm_irq_by_busid - Get interrupt from bus ID
+ * drm_legacy_irq_by_busid - Get interrupt from bus ID
  * @dev: DRM device
  * @data: IOCTL parameter pointing to a drm_irq_busid structure
  * @file_priv: DRM file private.
@@ -150,8 +150,8 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
  *
  * Return: 0 on success or a negative error code on failure.
  */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
 {
        struct drm_irq_busid *p = data;
 
index 9f955f2010c25770f53110a2ff92ba98d0caadf4..187b55ede62ec4a1b665e351aa45863a98652752 100644 (file)
@@ -386,8 +386,6 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
 
        if (obj->funcs && obj->funcs->export)
                dmabuf = obj->funcs->export(obj, flags);
-       else if (dev->driver->gem_prime_export)
-               dmabuf = dev->driver->gem_prime_export(obj, flags);
        else
                dmabuf = drm_gem_prime_export(obj, flags);
        if (IS_ERR(dmabuf)) {
@@ -419,7 +417,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
  * This is the PRIME export function which must be used mandatorily by GEM
  * drivers to ensure correct lifetime management of the underlying GEM object.
  * The actual exporting from GEM object to a dma-buf is done through the
- * &drm_driver.gem_prime_export driver callback.
+ * &drm_gem_object_funcs.export callback.
  */
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                               struct drm_file *file_priv, uint32_t handle,
@@ -622,10 +620,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
        if (WARN_ON(dir == DMA_NONE))
                return ERR_PTR(-EINVAL);
 
-       if (obj->funcs)
-               sgt = obj->funcs->get_sg_table(obj);
-       else
-               sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+       if (WARN_ON(!obj->funcs->get_sg_table))
+               return ERR_PTR(-ENOSYS);
+
+       sgt = obj->funcs->get_sg_table(obj);
+       if (IS_ERR(sgt))
+               return sgt;
 
        ret = dma_map_sgtable(attach->dev, sgt, dir,
                              DMA_ATTR_SKIP_CPU_SYNC);
@@ -663,38 +663,41 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
 /**
  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
  * @dma_buf: buffer to be mapped
+ * @map: the virtual address of the buffer
  *
  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
  *
  * Returns the kernel virtual address or NULL on failure.
  */
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
 {
        struct drm_gem_object *obj = dma_buf->priv;
        void *vaddr;
 
        vaddr = drm_gem_vmap(obj);
        if (IS_ERR(vaddr))
-               vaddr = NULL;
+               return PTR_ERR(vaddr);
 
-       return vaddr;
+       dma_buf_map_set_vaddr(map, vaddr);
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
 
 /**
  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
  * @dma_buf: buffer to be unmapped
- * @vaddr: the virtual address of the buffer
+ * @map: the virtual address of the buffer
  *
  * Releases a kernel virtual mapping. This can be used as the
  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
  */
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
 {
        struct drm_gem_object *obj = dma_buf->priv;
 
-       drm_gem_vunmap(obj, vaddr);
+       drm_gem_vunmap(obj, map->vaddr);
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 
index 74946690aba4e25b8704dee2e5292ad50f95eb5b..fa87b63e152a8f76cbd8173f5adba939e010d26f 100644 (file)
@@ -99,7 +99,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
 }
 
 static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
        struct drm_plane *plane;
        struct drm_simple_display_pipe *pipe;
@@ -113,7 +113,7 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
 }
 
 static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
        struct drm_simple_display_pipe *pipe;
 
index a9a3afaef9a1cb7b512f87412956a5cfb248db6e..aa270b79e58558a79358d11937791b8e96c998c8 100644 (file)
@@ -468,12 +468,6 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
        ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
 };
 
-static const struct vm_operations_struct vm_ops = {
-       .fault = etnaviv_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct file_operations fops = {
        .owner              = THIS_MODULE,
        .open               = drm_open,
@@ -490,16 +484,9 @@ static struct drm_driver etnaviv_drm_driver = {
        .driver_features    = DRIVER_GEM | DRIVER_RENDER,
        .open               = etnaviv_open,
        .postclose           = etnaviv_postclose,
-       .gem_free_object_unlocked = etnaviv_gem_free_object,
-       .gem_vm_ops         = &vm_ops,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_pin      = etnaviv_gem_prime_pin,
-       .gem_prime_unpin    = etnaviv_gem_prime_unpin,
-       .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
-       .gem_prime_vmap     = etnaviv_gem_prime_vmap,
-       .gem_prime_vunmap   = etnaviv_gem_prime_vunmap,
        .gem_prime_mmap     = etnaviv_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = etnaviv_debugfs_init,
index 4d8dc9236e5fd6bdf9dd361343ce7a2b74ecbd6a..914f0867ff71f8b42650e9ba0e13a56bf8598fd0 100644 (file)
@@ -49,7 +49,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
index d1533bdc1335ed6b29d4affe5a807e2bc8a495ef..67d9a2b9ea6a133610b5748d6d825fca33e59e34 100644 (file)
@@ -171,7 +171,7 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return obj->ops->mmap(obj, vma);
 }
 
-vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
+static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@ -559,6 +559,22 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
        mutex_unlock(&priv->gem_lock);
 }
 
+static const struct vm_operations_struct vm_ops = {
+       .fault = etnaviv_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
+       .free = etnaviv_gem_free_object,
+       .pin = etnaviv_gem_prime_pin,
+       .unpin = etnaviv_gem_prime_unpin,
+       .get_sg_table = etnaviv_gem_prime_get_sg_table,
+       .vmap = etnaviv_gem_prime_vmap,
+       .vunmap = etnaviv_gem_prime_vunmap,
+       .vm_ops = &vm_ops,
+};
+
 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
        const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
 {
@@ -593,6 +609,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
        INIT_LIST_HEAD(&etnaviv_obj->vram_list);
 
        *obj = &etnaviv_obj->base;
+       (*obj)->funcs = &etnaviv_gem_object_funcs;
 
        return 0;
 }
index 4aa3426a9ba4beea8863d7ed07bb24da388e7d0f..135fbff6fecf4d2c83d0141b584608336471353d 100644 (file)
@@ -70,9 +70,10 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
 
 static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
 {
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr);
+
        if (etnaviv_obj->vaddr)
-               dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
-                              etnaviv_obj->vaddr);
+               dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
 
        /* Don't drop the pages for imported dmabuf, as they are not
         * ours, just free the array we allocated:
@@ -85,9 +86,15 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
 
 static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
 {
+       struct dma_buf_map map;
+       int ret;
+
        lockdep_assert_held(&etnaviv_obj->lock);
 
-       return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+       ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+       if (ret)
+               return NULL;
+       return map.vaddr;
 }
 
 static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
index 1c03485676efa33e04376a42480a4a2e1a9aec88..35f1d1dbb1262e584bb657bcb2c000b68a26b2be 100644 (file)
@@ -19,7 +19,7 @@
 #include "exynos_drm_plane.h"
 
 static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
-                                         struct drm_crtc_state *old_state)
+                                         struct drm_atomic_state *state)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
@@ -30,7 +30,7 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
-                                          struct drm_crtc_state *old_state)
+                                          struct drm_atomic_state *state)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
index dbd80f1e4c78bd44118acdc20449f7a4f82bd624..fe46680ca2082c732feba22b4575d6262508711c 100644 (file)
@@ -75,11 +75,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
        file->driver_priv = NULL;
 }
 
-static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct drm_ioctl_desc exynos_ioctls[] = {
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
                        DRM_RENDER_ALLOW),
@@ -124,16 +119,11 @@ static struct drm_driver exynos_drm_driver = {
        .open                   = exynos_drm_open,
        .lastclose              = drm_fb_helper_lastclose,
        .postclose              = exynos_drm_postclose,
-       .gem_free_object_unlocked = exynos_drm_gem_free_object,
-       .gem_vm_ops             = &exynos_drm_gem_vm_ops,
        .dumb_create            = exynos_drm_gem_dumb_create,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import       = exynos_drm_gem_prime_import,
-       .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
        .gem_prime_import_sg_table      = exynos_drm_gem_prime_import_sg_table,
-       .gem_prime_vmap         = exynos_drm_gem_prime_vmap,
-       .gem_prime_vunmap       = exynos_drm_gem_prime_vunmap,
        .gem_prime_mmap         = exynos_drm_gem_prime_mmap,
        .ioctls                 = exynos_ioctls,
        .num_ioctls             = ARRAY_SIZE(exynos_ioctls),
index 7777f19c9d38ff879434140ff44ce44f45c448db..4afbf5109cbff11f43ea0ccbe826880dc959289d 100644 (file)
@@ -127,6 +127,19 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
        kfree(exynos_gem);
 }
 
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
+       .free = exynos_drm_gem_free_object,
+       .get_sg_table = exynos_drm_gem_prime_get_sg_table,
+       .vmap = exynos_drm_gem_prime_vmap,
+       .vunmap = exynos_drm_gem_prime_vunmap,
+       .vm_ops = &exynos_drm_gem_vm_ops,
+};
+
 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
                                                  unsigned long size)
 {
@@ -141,6 +154,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
        exynos_gem->size = size;
        obj = &exynos_gem->base;
 
+       obj->funcs = &exynos_drm_gem_object_funcs;
+
        ret = drm_gem_object_init(dev, obj, size);
        if (ret < 0) {
                DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
index b9ca81a6f80fb324eda22eb0e858728a279245bb..7a9e89cfdf9cad8ad7f9d0a522868542c43825c3 100644 (file)
@@ -43,8 +43,10 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_crtc_state)
+                                       struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct drm_device *dev = crtc->dev;
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 
@@ -62,7 +64,7 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc,
-                                          struct drm_crtc_state *old_state)
+                                          struct drm_atomic_state *state)
 {
        struct drm_device *dev = crtc->dev;
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
index 720a767118c9cd31acd3cda5116e605862d7a74e..bfd9a15d63b1a25a33abe256fe976478d9e4a294 100644 (file)
@@ -1501,8 +1501,7 @@ cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
        clock_recovery = false;
 
        DRM_DEBUG_KMS("Start train\n");
-               reg = DP | DP_LINK_TRAIN_PAT_1;
-
+       reg = DP | DP_LINK_TRAIN_PAT_1;
 
        for (;;) {
                /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
@@ -1575,7 +1574,7 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
        cr_tries = 0;
 
        DRM_DEBUG_KMS("\n");
-               reg = DP | DP_LINK_TRAIN_PAT_2;
+       reg = DP | DP_LINK_TRAIN_PAT_2;
 
        for (;;) {
 
@@ -2083,7 +2082,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
                        DRM_INFO("failed to retrieve link info, disabling eDP\n");
                        drm_encoder_cleanup(encoder);
                        cdv_intel_dp_destroy(connector);
-                       goto err_priv;
+                       goto err_connector;
                } else {
                        DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
                                intel_dp->dpcd[0], intel_dp->dpcd[1], 
index 54d9876b5305a475de62e4b219c77437787809b1..5ede24fb44aeff1b8227424a15aff5c7aff919bb 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 
 #include "framebuffer.h"
+#include "gem.h"
 #include "gtt.h"
 #include "psb_drv.h"
 #include "psb_intel_drv.h"
@@ -285,6 +286,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
        /* Begin by trying to use stolen memory backing */
        backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
        if (backing) {
+               backing->gem.funcs = &psb_gem_object_funcs;
                drm_gem_private_object_init(dev, &backing->gem, aligned_size);
                return backing;
        }
index f9c4b1d76f566050bccb44dc6bcf80865dc7d1fd..8f07de83b6fb24ac05ccc93faa6578b691409cc7 100644 (file)
@@ -18,7 +18,9 @@
 
 #include "psb_drv.h"
 
-void psb_gem_free_object(struct drm_gem_object *obj)
+static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
+
+static void psb_gem_free_object(struct drm_gem_object *obj)
 {
        struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
 
@@ -36,6 +38,17 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
        return -EINVAL;
 }
 
+static const struct vm_operations_struct psb_gem_vm_ops = {
+       .fault = psb_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+const struct drm_gem_object_funcs psb_gem_object_funcs = {
+       .free = psb_gem_free_object,
+       .vm_ops = &psb_gem_vm_ops,
+};
+
 /**
  *     psb_gem_create          -       create a mappable object
  *     @file: the DRM file of the client
@@ -63,6 +76,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
                dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
                return -ENOSPC;
        }
+       r->gem.funcs = &psb_gem_object_funcs;
        /* Initialize the extra goodies GEM needs to do all the hard work */
        if (drm_gem_object_init(dev, &r->gem, size) != 0) {
                psb_gtt_free_range(dev, r);
@@ -123,7 +137,7 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  *     vma->vm_private_data points to the GEM object that is backing this
  *     mapping.
  */
-vm_fault_t psb_gem_fault(struct vm_fault *vmf)
+static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj;
index 4a74dc623b6bb9b67f69120e76120d39f89477e3..3741a711b9fdecc57e8d2df255e63072ef03187d 100644 (file)
@@ -8,6 +8,9 @@
 #ifndef _GEM_H
 #define _GEM_H
 
+extern const struct drm_gem_object_funcs psb_gem_object_funcs;
+
 extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
                          u64 size, u32 *handlep, int stolen, u32 align);
+
 #endif
index 34b4aae9a15e3ee94de91020a67f43fe91b195f3..b13376a6fb91d5ec1416ab8ba6efd00a4f533cfd 100644 (file)
@@ -480,12 +480,6 @@ static const struct dev_pm_ops psb_pm_ops = {
        .runtime_idle = psb_runtime_idle,
 };
 
-static const struct vm_operations_struct psb_gem_vm_ops = {
-       .fault = psb_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct file_operations psb_gem_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@ -507,9 +501,6 @@ static struct drm_driver driver = {
        .irq_uninstall = psb_irq_uninstall,
        .irq_handler = psb_irq_handler,
 
-       .gem_free_object_unlocked = psb_gem_free_object,
-       .gem_vm_ops = &psb_gem_vm_ops,
-
        .dumb_create = psb_gem_dumb_create,
        .ioctls = psb_ioctls,
        .fops = &psb_gem_fops,
index 9569263413164f861724bc378eb6b728a294baec..c71a5a4e912ce94a7660e6cf477804a4184613b7 100644 (file)
@@ -735,12 +735,10 @@ extern const struct drm_connector_helper_funcs
 extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
 
 /* gem.c */
-extern void psb_gem_free_object(struct drm_gem_object *obj);
 extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
                        struct drm_file *file);
 extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
-extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
 
 /* psb_device.c */
 extern const struct psb_ops psb_chip_ops;
index f991327155976b7ac19a2f092d3e5e160dc563cc..684ef794eb7cbe57cfed783013e91c86d2b8d0c8 100644 (file)
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
 
 obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
index 4d57ec688f827a161e674837fcbe1b1dc929d33e..ef18b47871950e5a6acbe221d66999307e289cf3 100644 (file)
 #include "hibmc_drm_regs.h"
 
 struct hibmc_display_panel_pll {
-       unsigned long M;
-       unsigned long N;
-       unsigned long OD;
-       unsigned long POD;
+       u64 M;
+       u64 N;
+       u64 OD;
+       u64 POD;
 };
 
 struct hibmc_dislay_pll_config {
-       unsigned long hdisplay;
-       unsigned long vdisplay;
+       u64 hdisplay;
+       u64 vdisplay;
        u32 pll1_config_value;
        u32 pll2_config_value;
 };
@@ -52,8 +52,6 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
        {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
 };
 
-#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
-
 static int hibmc_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
@@ -104,8 +102,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
        struct drm_plane_state  *state  = plane->state;
        u32 reg;
        s64 gpu_addr = 0;
-       unsigned int line_l;
-       struct hibmc_drm_private *priv = plane->dev->dev_private;
+       u32 line_l;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
        struct drm_gem_vram_object *gbo;
 
        if (!state->fb)
@@ -141,7 +139,7 @@ static const u32 channel_formats1[] = {
        DRM_FORMAT_ABGR8888
 };
 
-static struct drm_plane_funcs hibmc_plane_funcs = {
+static const struct drm_plane_funcs hibmc_plane_funcs = {
        .update_plane   = drm_atomic_helper_update_plane,
        .disable_plane  = drm_atomic_helper_disable_plane,
        .destroy = drm_plane_cleanup,
@@ -157,10 +155,10 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
        .atomic_update = hibmc_plane_atomic_update,
 };
 
-static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, u32 dpms)
 {
-       struct hibmc_drm_private *priv = crtc->dev->dev_private;
-       unsigned int reg;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
+       u32 reg;
 
        reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
        reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
@@ -172,10 +170,10 @@ static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
 }
 
 static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
-       unsigned int reg;
-       struct hibmc_drm_private *priv = crtc->dev->dev_private;
+       u32 reg;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
 
        hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
 
@@ -191,10 +189,10 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
-       unsigned int reg;
-       struct hibmc_drm_private *priv = crtc->dev->dev_private;
+       u32 reg;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
 
        hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
        drm_crtc_vblank_off(crtc);
@@ -214,7 +212,7 @@ static enum drm_mode_status
 hibmc_crtc_mode_valid(struct drm_crtc *crtc,
                      const struct drm_display_mode *mode)
 {
-       int i = 0;
+       size_t i = 0;
        int vrefresh = drm_mode_vrefresh(mode);
 
        if (vrefresh < 59 || vrefresh > 61)
@@ -229,9 +227,9 @@ hibmc_crtc_mode_valid(struct drm_crtc *crtc,
        return MODE_BAD;
 }
 
-static unsigned int format_pll_reg(void)
+static u32 format_pll_reg(void)
 {
-       unsigned int pllreg = 0;
+       u32 pllreg = 0;
        struct hibmc_display_panel_pll pll = {0};
 
        /*
@@ -251,10 +249,10 @@ static unsigned int format_pll_reg(void)
        return pllreg;
 }
 
-static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
+static void set_vclock_hisilicon(struct drm_device *dev, u64 pll)
 {
        u32 val;
-       struct hibmc_drm_private *priv = dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
 
        val = readl(priv->mmio + CRT_PLL1_HS);
        val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
@@ -281,11 +279,10 @@ static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
        writel(val, priv->mmio + CRT_PLL1_HS);
 }
 
-static void get_pll_config(unsigned long x, unsigned long y,
-                          u32 *pll1, u32 *pll2)
+static void get_pll_config(u64 x, u64 y, u32 *pll1, u32 *pll2)
 {
-       int i;
-       int count = ARRAY_SIZE(hibmc_pll_table);
+       size_t i;
+       size_t count = ARRAY_SIZE(hibmc_pll_table);
 
        for (i = 0; i < count; i++) {
                if (hibmc_pll_table[i].hdisplay == x &&
@@ -308,14 +305,14 @@ static void get_pll_config(unsigned long x, unsigned long y,
  * FPGA only supports 7 predefined pixel clocks, and clock select is
  * in bit 4:0 of new register 0x802a8.
  */
-static unsigned int display_ctrl_adjust(struct drm_device *dev,
-                                       struct drm_display_mode *mode,
-                                       unsigned int ctrl)
+static u32 display_ctrl_adjust(struct drm_device *dev,
+                              struct drm_display_mode *mode,
+                              u32 ctrl)
 {
-       unsigned long x, y;
+       u64 x, y;
        u32 pll1; /* bit[31:0] of PLL */
        u32 pll2; /* bit[63:32] of PLL */
-       struct hibmc_drm_private *priv = dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
 
        x = mode->hdisplay;
        y = mode->vdisplay;
@@ -360,12 +357,12 @@ static unsigned int display_ctrl_adjust(struct drm_device *dev,
 
 static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
-       unsigned int val;
+       u32 val;
        struct drm_display_mode *mode = &crtc->state->mode;
        struct drm_device *dev = crtc->dev;
-       struct hibmc_drm_private *priv = dev->dev_private;
-       int width = mode->hsync_end - mode->hsync_start;
-       int height = mode->vsync_end - mode->vsync_start;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+       u32 width = mode->hsync_end - mode->hsync_start;
+       u32 height = mode->vsync_end - mode->vsync_start;
 
        writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
        writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
@@ -395,9 +392,9 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_state)
 {
-       unsigned int reg;
+       u32 reg;
        struct drm_device *dev = crtc->dev;
-       struct hibmc_drm_private *priv = dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
 
        hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
 
@@ -427,7 +424,7 @@ static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
 
 static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
 {
-       struct hibmc_drm_private *priv = crtc->dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
 
        writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
               priv->mmio + HIBMC_RAW_INTERRUPT_EN);
@@ -437,7 +434,7 @@ static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
 
 static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
 {
-       struct hibmc_drm_private *priv = crtc->dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
 
        writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
               priv->mmio + HIBMC_RAW_INTERRUPT_EN);
@@ -445,18 +442,18 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
 
 static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
 {
-       struct hibmc_drm_private *priv = crtc->dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
        void __iomem   *mmio = priv->mmio;
        u16 *r, *g, *b;
-       unsigned int reg;
-       int i;
+       u32 reg;
+       u32 i;
 
        r = crtc->gamma_store;
        g = r + crtc->gamma_size;
        b = g + crtc->gamma_size;
 
        for (i = 0; i < crtc->gamma_size; i++) {
-               unsigned int offset = i << 2;
+               u32 offset = i << 2;
                u8 red = *r++ >> 8;
                u8 green = *g++ >> 8;
                u8 blue = *b++ >> 8;
index 085d1b2fa8c0a5e229f14642544e285b5f1beb56..fee6fe810e746cc559f50233e0832f658cf836a3 100644 (file)
@@ -29,8 +29,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops);
 static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *)arg;
-       struct hibmc_drm_private *priv =
-               (struct hibmc_drm_private *)dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
        u32 status;
 
        status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
@@ -122,12 +121,11 @@ static void hibmc_kms_fini(struct hibmc_drm_private *priv)
 /*
  * It can operate in one of three modes: 0, 1 or Sleep.
  */
-void hibmc_set_power_mode(struct hibmc_drm_private *priv,
-                         unsigned int power_mode)
+void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode)
 {
-       unsigned int control_value = 0;
+       u32 control_value = 0;
        void __iomem   *mmio = priv->mmio;
-       unsigned int input = 1;
+       u32 input = 1;
 
        if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
                return;
@@ -145,8 +143,8 @@ void hibmc_set_power_mode(struct hibmc_drm_private *priv,
 
 void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
 {
-       unsigned int gate_reg;
-       unsigned int mode;
+       u32 gate_reg;
+       u32 mode;
        void __iomem   *mmio = priv->mmio;
 
        /* Get current power mode. */
@@ -171,7 +169,7 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
 
 static void hibmc_hw_config(struct hibmc_drm_private *priv)
 {
-       unsigned int reg;
+       u32 reg;
 
        /* On hardware reset, power mode 0 is default. */
        hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@@ -244,7 +242,7 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
 
 static int hibmc_unload(struct drm_device *dev)
 {
-       struct hibmc_drm_private *priv = dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
 
        drm_atomic_helper_shutdown(dev);
 
@@ -371,7 +369,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
        drm_dev_put(dev);
 }
 
-static struct pci_device_id hibmc_pci_table[] = {
+static const struct pci_device_id hibmc_pci_table[] = {
        { PCI_VDEVICE(HUAWEI, 0x1711) },
        {0,}
 };
index 197485e2fe0b23eccbb3b72ebf9eb3397fa84521..f310a83d9c48d89c9d1160130f03c6e5bbbe4ff1 100644 (file)
 #ifndef HIBMC_DRM_DRV_H
 #define HIBMC_DRM_DRV_H
 
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_edid.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_framebuffer.h>
 
-struct drm_device;
+struct hibmc_connector {
+       struct drm_connector base;
+
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data bit_data;
+};
 
 struct hibmc_drm_private {
        /* hw */
        void __iomem   *mmio;
        void __iomem   *fb_map;
-       unsigned long  fb_base;
-       unsigned long  fb_size;
+       resource_size_t  fb_base;
+       resource_size_t  fb_size;
 
        /* drm */
        struct drm_device  *dev;
        struct drm_plane primary_plane;
        struct drm_crtc crtc;
        struct drm_encoder encoder;
-       struct drm_connector connector;
+       struct hibmc_connector connector;
        bool mode_config_initialized;
 };
 
+static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+{
+       return container_of(connector, struct hibmc_connector, base);
+}
+
+static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+{
+       return dev->dev_private;
+}
+
 void hibmc_set_power_mode(struct hibmc_drm_private *priv,
-                         unsigned int power_mode);
+                         u32 power_mode);
 void hibmc_set_current_gate(struct hibmc_drm_private *priv,
-                           unsigned int gate);
+                           u32 gate);
 
 int hibmc_de_init(struct hibmc_drm_private *priv);
 int hibmc_vdac_init(struct hibmc_drm_private *priv);
@@ -47,6 +67,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc);
 void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
 int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
                      struct drm_mode_create_dumb *args);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
 
 extern const struct drm_mode_config_funcs hibmc_mode_funcs;
 
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
new file mode 100644 (file)
index 0000000..86d7120
--- /dev/null
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ *      Tian Tao <tiantao6@hisilicon.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+#define GPIO_DATA              0x0802A0
+#define GPIO_DATA_DIRECTION    0x0802A4
+
+#define I2C_SCL_MASK           BIT(0)
+#define I2C_SDA_MASK           BIT(1)
+
+static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+{
+       struct hibmc_connector *hibmc_connector = data;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+       u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+       if (value) {
+               tmp_dir &= ~mask;
+               writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+       } else {
+               u32 tmp_data = readl(priv->mmio + GPIO_DATA);
+
+               tmp_data &= ~mask;
+               writel(tmp_data, priv->mmio + GPIO_DATA);
+
+               tmp_dir |= mask;
+               writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+       }
+}
+
+static int hibmc_get_i2c_signal(void *data, u32 mask)
+{
+       struct hibmc_connector *hibmc_connector = data;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+       u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+       if ((tmp_dir & mask) != mask) {
+               tmp_dir &= ~mask;
+               writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+       }
+
+       return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
+}
+
+static void hibmc_ddc_setsda(void *data, int state)
+{
+       hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
+}
+
+static void hibmc_ddc_setscl(void *data, int state)
+{
+       hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
+}
+
+static int hibmc_ddc_getsda(void *data)
+{
+       return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
+}
+
+static int hibmc_ddc_getscl(void *data)
+{
+       return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+}
+
+int hibmc_ddc_create(struct drm_device *drm_dev,
+                    struct hibmc_connector *connector)
+{
+       connector->adapter.owner = THIS_MODULE;
+       connector->adapter.class = I2C_CLASS_DDC;
+       snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+       connector->adapter.dev.parent = &drm_dev->pdev->dev;
+       i2c_set_adapdata(&connector->adapter, connector);
+       connector->adapter.algo_data = &connector->bit_data;
+
+       connector->bit_data.udelay = 20;
+       connector->bit_data.timeout = usecs_to_jiffies(2000);
+       connector->bit_data.data = connector;
+       connector->bit_data.setsda = hibmc_ddc_setsda;
+       connector->bit_data.setscl = hibmc_ddc_setscl;
+       connector->bit_data.getsda = hibmc_ddc_getsda;
+       connector->bit_data.getscl = hibmc_ddc_getscl;
+
+       return i2c_bit_add_bus(&connector->adapter);
+}
index 376a05ddbc2f38041d16b7beec619c10795b648a..74e26c27d8785de74377d3572a625c914245589e 100644 (file)
 static int hibmc_connector_get_modes(struct drm_connector *connector)
 {
        int count;
+       void *edid;
+       struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+
+       edid = drm_get_edid(connector, &hibmc_connector->adapter);
+       if (edid) {
+               drm_connector_update_edid_property(connector, edid);
+               count = drm_add_edid_modes(connector, edid);
+               if (count)
+                       goto out;
+       }
 
        count = drm_add_modes_noedid(connector,
                                     connector->dev->mode_config.max_width,
                                     connector->dev->mode_config.max_height);
        drm_set_preferred_mode(connector, 1024, 768);
 
+out:
+       kfree(edid);
        return count;
 }
 
 static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
-                                     struct drm_display_mode *mode)
+                                                      struct drm_display_mode *mode)
 {
        return MODE_OK;
 }
 
+static void hibmc_connector_destroy(struct drm_connector *connector)
+{
+       struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+
+       i2c_del_adapter(&hibmc_connector->adapter);
+       drm_connector_cleanup(connector);
+}
+
 static const struct drm_connector_helper_funcs
        hibmc_connector_helper_funcs = {
        .get_modes = hibmc_connector_get_modes,
@@ -44,7 +64,7 @@ static const struct drm_connector_helper_funcs
 
 static const struct drm_connector_funcs hibmc_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
-       .destroy = drm_connector_cleanup,
+       .destroy = hibmc_connector_destroy,
        .reset = drm_atomic_helper_connector_reset,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -56,7 +76,7 @@ static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
 {
        u32 reg;
        struct drm_device *dev = encoder->dev;
-       struct hibmc_drm_private *priv = dev->dev_private;
+       struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
 
        reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
        reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
@@ -77,10 +97,17 @@ static const struct drm_encoder_funcs hibmc_encoder_funcs = {
 int hibmc_vdac_init(struct hibmc_drm_private *priv)
 {
        struct drm_device *dev = priv->dev;
+       struct hibmc_connector *hibmc_connector = &priv->connector;
        struct drm_encoder *encoder = &priv->encoder;
-       struct drm_connector *connector = &priv->connector;
+       struct drm_connector *connector = &hibmc_connector->base;
        int ret;
 
+       ret = hibmc_ddc_create(dev, hibmc_connector);
+       if (ret) {
+               drm_err(dev, "failed to create ddc: %d\n", ret);
+               return ret;
+       }
+
        encoder->possible_crtcs = 0x1;
        ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
                               DRM_MODE_ENCODER_DAC, NULL);
@@ -91,12 +118,15 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
 
        drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
 
-       ret = drm_connector_init(dev, connector, &hibmc_connector_funcs,
-                                DRM_MODE_CONNECTOR_VGA);
+       ret = drm_connector_init_with_ddc(dev, connector,
+                                         &hibmc_connector_funcs,
+                                         DRM_MODE_CONNECTOR_VGA,
+                                         &hibmc_connector->adapter);
        if (ret) {
                drm_err(dev, "failed to init connector: %d\n", ret);
                return ret;
        }
+
        drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
 
        drm_connector_attach_encoder(connector, encoder);
index e1108c1735ad06f1d776f5f87915e176bf2797b3..cfe8ff596d550d52ad86ab52117af577720aff29 100644 (file)
@@ -436,7 +436,7 @@ static void ade_dump_regs(void __iomem *base) { }
 #endif
 
 static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
        struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
        struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
@@ -459,7 +459,7 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
        struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
index 8dd295dbe241689bf440ff17ea0b1b0fb0c801d2..0dd477e56573629801ffdc50be2c710b6c3e08dd 100644 (file)
@@ -77,14 +77,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
        i915_gem_object_unpin_pages(obj);
 }
 
-static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+       void *vaddr;
 
-       return i915_gem_object_pin_map(obj, I915_MAP_WB);
+       vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+
+       dma_buf_map_set_vaddr(map, vaddr);
+
+       return 0;
 }
 
-static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 
index c8421fd9d2dcb2956b197f8b09fa62e8de5210d8..3389ac972d16356b5843a4be81f79b95c0a7f322 100644 (file)
@@ -39,9 +39,18 @@ static struct i915_global_object {
        struct kmem_cache *slab_objects;
 } global;
 
+static const struct drm_gem_object_funcs i915_gem_object_funcs;
+
 struct drm_i915_gem_object *i915_gem_object_alloc(void)
 {
-       return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+       struct drm_i915_gem_object *obj;
+
+       obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+       if (!obj)
+               return NULL;
+       obj->base.funcs = &i915_gem_object_funcs;
+
+       return obj;
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -101,7 +110,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
                !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
 }
 
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem);
        struct drm_i915_file_private *fpriv = file->driver_priv;
@@ -264,7 +273,7 @@ static void __i915_gem_free_work(struct work_struct *work)
        i915_gem_flush_free_objects(i915);
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -403,6 +412,12 @@ int __init i915_global_objects_init(void)
        return 0;
 }
 
+static const struct drm_gem_object_funcs i915_gem_object_funcs = {
+       .free = i915_gem_free_object,
+       .close = i915_gem_close_object,
+       .export = i915_gem_prime_export,
+};
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
 #include "selftests/huge_pages.c"
index d46db8d8f38e4e85154f85571ac732b656b50213..eaf3d4147be07d3538eddb8f465f9abe46882375 100644 (file)
@@ -38,9 +38,6 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
 
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
-void i915_gem_free_object(struct drm_gem_object *obj);
-
 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 
 struct sg_table *
index 0845ce1ae37c142902097e000fb0dfddd9256391..b6d43880b0c19afabbbc5b53c535f07608eb4984 100644 (file)
@@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg)
        struct drm_i915_gem_object *obj;
        struct dma_buf *dmabuf;
        void *obj_map, *dma_map;
+       struct dma_buf_map map;
        u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
        int err, i;
 
@@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg)
                goto out_obj;
        }
 
-       dma_map = dma_buf_vmap(dmabuf);
+       err = dma_buf_vmap(dmabuf, &map);
+       dma_map = err ? NULL : map.vaddr;
        if (!dma_map) {
                pr_err("dma_buf_vmap failed\n");
                err = -ENOMEM;
@@ -150,7 +152,7 @@ static int igt_dmabuf_import(void *arg)
 
        err = 0;
 out_dma_map:
-       dma_buf_vunmap(dmabuf, dma_map);
+       dma_buf_vunmap(dmabuf, &map);
 out_obj:
        i915_gem_object_put(obj);
 out_dmabuf:
@@ -163,6 +165,7 @@ static int igt_dmabuf_import_ownership(void *arg)
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
        struct dma_buf *dmabuf;
+       struct dma_buf_map map;
        void *ptr;
        int err;
 
@@ -170,7 +173,8 @@ static int igt_dmabuf_import_ownership(void *arg)
        if (IS_ERR(dmabuf))
                return PTR_ERR(dmabuf);
 
-       ptr = dma_buf_vmap(dmabuf);
+       err = dma_buf_vmap(dmabuf, &map);
+       ptr = err ? NULL : map.vaddr;
        if (!ptr) {
                pr_err("dma_buf_vmap failed\n");
                err = -ENOMEM;
@@ -178,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg)
        }
 
        memset(ptr, 0xc5, PAGE_SIZE);
-       dma_buf_vunmap(dmabuf, ptr);
+       dma_buf_vunmap(dmabuf, &map);
 
        obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
        if (IS_ERR(obj)) {
@@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg)
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
        struct dma_buf *dmabuf;
+       struct dma_buf_map map;
        void *ptr;
        int err;
 
@@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg)
        }
        i915_gem_object_put(obj);
 
-       ptr = dma_buf_vmap(dmabuf);
+       err = dma_buf_vmap(dmabuf, &map);
+       ptr = err ? NULL : map.vaddr;
        if (!ptr) {
                pr_err("dma_buf_vmap failed\n");
                err = -ENOMEM;
@@ -244,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg)
        memset(ptr, 0xc5, dmabuf->size);
 
        err = 0;
-       dma_buf_vunmap(dmabuf, ptr);
+       dma_buf_vunmap(dmabuf, &map);
 out:
        dma_buf_put(dmabuf);
        return err;
index be30b27e2926b7aff09301ee7dc9fa2773ede9ca..2855d11c7a51c01c8e273479c87b9380c628069c 100644 (file)
@@ -61,18 +61,24 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf)
        kfree(mock);
 }
 
-static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
 {
        struct mock_dmabuf *mock = to_mock(dma_buf);
+       void *vaddr;
 
-       return vm_map_ram(mock->pages, mock->npages, 0);
+       vaddr = vm_map_ram(mock->pages, mock->npages, 0);
+       if (!vaddr)
+               return -ENOMEM;
+       dma_buf_map_set_vaddr(map, vaddr);
+
+       return 0;
 }
 
-static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
 {
        struct mock_dmabuf *mock = to_mock(dma_buf);
 
-       vm_unmap_ram(vaddr, mock->npages);
+       vm_unmap_ram(map->vaddr, mock->npages);
 }
 
 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
index acc32066cec3572481c11732f291b92df5068a82..45e719c791832ba6e68887b30bc5aeee1bae608e 100644 (file)
@@ -1750,12 +1750,8 @@ static struct drm_driver driver = {
        .lastclose = i915_driver_lastclose,
        .postclose = i915_driver_postclose,
 
-       .gem_close_object = i915_gem_close_object,
-       .gem_free_object_unlocked = i915_gem_free_object,
-
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = i915_gem_prime_export,
        .gem_prime_import = i915_gem_prime_import,
 
        .dumb_create = i915_gem_dumb_create,
index b6c42fd872adecb2881e9a933e15dbd83b447962..9220c9d1a4b7c7c5d6f4a46c0f2f627dd9bcf8a1 100644 (file)
@@ -85,9 +85,6 @@ static struct drm_driver mock_driver = {
        .name = "mock",
        .driver_features = DRIVER_GEM,
        .release = mock_device_release,
-
-       .gem_close_object = i915_gem_close_object,
-       .gem_free_object_unlocked = i915_gem_free_object,
 };
 
 static void release_dev(struct device *dev)
index 36abff0890b2873343cd57fcfa14c97c521cc4bf..8f570eb5f471e3a99caa82985d774fac44d09f6f 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright 2019 NXP.
  */
 
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_vblank.h>
 #include <linux/platform_device.h>
@@ -77,8 +78,10 @@ static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_crtc_state)
+                                   struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
                                                   base);
        struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
@@ -111,8 +114,10 @@ static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_crtc_state)
+                                    struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
                                                   base);
        struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
index 135a62366ab8bcf003b8350b3052b8291953eef6..b72e5cef7e40b06767d142ee460ca1cc9ea7557a 100644 (file)
@@ -28,19 +28,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
 
 static struct drm_driver dcss_kms_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
-       .gem_free_object_unlocked = drm_gem_cma_free_object,
-       .gem_vm_ops             = &drm_gem_cma_vm_ops,
-       .dumb_create            = drm_gem_cma_dumb_create,
-
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
-       .gem_prime_import       = drm_gem_prime_import,
-       .gem_prime_export       = drm_gem_prime_export,
-       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
-       .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
-       .gem_prime_vmap         = drm_gem_cma_prime_vmap,
-       .gem_prime_vunmap       = drm_gem_cma_prime_vunmap,
-       .gem_prime_mmap         = drm_gem_cma_prime_mmap,
+       DRM_GEM_CMA_DRIVER_OPS,
        .fops                   = &dcss_cma_fops,
        .name                   = "imx-dcss",
        .desc                   = "i.MX8MQ Display Subsystem",
index 961d671f171b484cb37fa54f100d6a3d049caba5..e13652e3a115c87165d5b75023c5566d5aa550ab 100644 (file)
@@ -267,7 +267,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
        struct dcss_plane *dcss_plane = to_dcss_plane(plane);
        struct dcss_dev *dcss = plane->dev->dev_private;
        struct drm_framebuffer *fb = state->fb;
-       u32 pixel_format;
        struct drm_crtc_state *crtc_state;
        bool modifiers_present;
        u32 src_w, src_h, dst_w, dst_h;
@@ -277,7 +276,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
        if (!fb || !state->crtc || !state->visible)
                return;
 
-       pixel_format = state->fb->format->format;
        crtc_state = state->crtc->state;
        modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
 
index d412fc265395e4aa5e5b23a0180447fd4aff137f..7ecc27c41a6a8bf9facd05e95e774936556c91b8 100644 (file)
@@ -47,7 +47,7 @@ static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
 }
 
 static void ipu_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -79,8 +79,10 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
 }
 
 static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_crtc_state)
+                                   struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 
index a3d1617d7c67e0d45e564be8697cc3ffc647c339..2329754af1169e2fd8f35fcad6cfd24334976911 100644 (file)
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_color_mgmt.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
@@ -40,12 +43,21 @@ struct ingenic_dma_hwdesc {
        u32 addr;
        u32 id;
        u32 cmd;
-} __packed;
+} __aligned(16);
+
+struct ingenic_dma_hwdescs {
+       struct ingenic_dma_hwdesc hwdesc_f0;
+       struct ingenic_dma_hwdesc hwdesc_f1;
+       struct ingenic_dma_hwdesc hwdesc_pal;
+       u16 palette[256] __aligned(16);
+};
 
 struct jz_soc_info {
        bool needs_dev_clk;
        bool has_osd;
        unsigned int max_width, max_height;
+       const u32 *formats_f0, *formats_f1;
+       unsigned int num_formats_f0, num_formats_f1;
 };
 
 struct ingenic_drm {
@@ -63,17 +75,26 @@ struct ingenic_drm {
        struct clk *lcd_clk, *pix_clk;
        const struct jz_soc_info *soc_info;
 
-       struct ingenic_dma_hwdesc *dma_hwdesc_f0, *dma_hwdesc_f1;
-       dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
+       struct ingenic_dma_hwdescs *dma_hwdescs;
+       dma_addr_t dma_hwdescs_phys;
 
        bool panel_is_sharp;
        bool no_vblank;
-};
 
-static const u32 ingenic_drm_primary_formats[] = {
-       DRM_FORMAT_XRGB1555,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
+       /*
+        * clk_mutex is used to synchronize the pixel clock rate update with
+        * the VBLANK. When the pixel clock's parent clock needs to be updated,
+        * clock_nb's notifier function will lock the mutex, then wait until the
+        * next VBLANK. At that point, the parent clock's rate can be updated,
+        * and the mutex is then unlocked. If an atomic commit happens in the
+        * meantime, it will lock on the mutex, effectively waiting until the
+        * clock update process finishes. Finally, the pixel clock's rate will
+        * be recomputed when the mutex has been released, in the pending atomic
+        * commit, or a future one.
+        */
+       struct mutex clk_mutex;
+       bool update_clk_rate;
+       struct notifier_block clock_nb;
 };
 
 static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
@@ -111,8 +132,31 @@ static inline struct ingenic_drm *drm_crtc_get_priv(struct drm_crtc *crtc)
        return container_of(crtc, struct ingenic_drm, crtc);
 }
 
+static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb)
+{
+       return container_of(nb, struct ingenic_drm, clock_nb);
+}
+
+static int ingenic_drm_update_pixclk(struct notifier_block *nb,
+                                    unsigned long action,
+                                    void *data)
+{
+       struct ingenic_drm *priv = drm_nb_get_priv(nb);
+
+       switch (action) {
+       case PRE_RATE_CHANGE:
+               mutex_lock(&priv->clk_mutex);
+               priv->update_clk_rate = true;
+               drm_crtc_wait_one_vblank(&priv->crtc);
+               return NOTIFY_OK;
+       default:
+               mutex_unlock(&priv->clk_mutex);
+               return NOTIFY_OK;
+       }
+}
+
 static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
-                                          struct drm_crtc_state *state)
+                                          struct drm_atomic_state *state)
 {
        struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
 
@@ -126,7 +170,7 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
-                                           struct drm_crtc_state *state)
+                                           struct drm_atomic_state *state)
 {
        struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
        unsigned int var;
@@ -200,6 +244,12 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
        struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
        struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
 
+       if (state->gamma_lut &&
+           drm_color_lut_size(state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
+               dev_dbg(priv->dev, "Invalid palette size\n");
+               return -EINVAL;
+       }
+
        if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) {
                f1_state = drm_atomic_get_plane_state(state->state, &priv->f1);
                if (IS_ERR(f1_state))
@@ -276,8 +326,14 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 
        if (drm_atomic_crtc_needs_modeset(state)) {
                ingenic_drm_crtc_update_timings(priv, &state->mode);
+               priv->update_clk_rate = true;
+       }
 
+       if (priv->update_clk_rate) {
+               mutex_lock(&priv->clk_mutex);
                clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000);
+               priv->update_clk_rate = false;
+               mutex_unlock(&priv->clk_mutex);
        }
 
        if (event) {
@@ -398,24 +454,39 @@ void ingenic_drm_plane_config(struct device *dev,
                case DRM_FORMAT_RGB565:
                        ctrl |= JZ_LCD_OSDCTRL_BPP_15_16;
                        break;
+               case DRM_FORMAT_RGB888:
+                       ctrl |= JZ_LCD_OSDCTRL_BPP_24_COMP;
+                       break;
                case DRM_FORMAT_XRGB8888:
                        ctrl |= JZ_LCD_OSDCTRL_BPP_18_24;
                        break;
+               case DRM_FORMAT_XRGB2101010:
+                       ctrl |= JZ_LCD_OSDCTRL_BPP_30;
+                       break;
                }
 
                regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
                                   JZ_LCD_OSDCTRL_BPP_MASK, ctrl);
        } else {
                switch (fourcc) {
+               case DRM_FORMAT_C8:
+                       ctrl |= JZ_LCD_CTRL_BPP_8;
+                       break;
                case DRM_FORMAT_XRGB1555:
                        ctrl |= JZ_LCD_CTRL_RGB555;
                        fallthrough;
                case DRM_FORMAT_RGB565:
                        ctrl |= JZ_LCD_CTRL_BPP_15_16;
                        break;
+               case DRM_FORMAT_RGB888:
+                       ctrl |= JZ_LCD_CTRL_BPP_24_COMP;
+                       break;
                case DRM_FORMAT_XRGB8888:
                        ctrl |= JZ_LCD_CTRL_BPP_18_24;
                        break;
+               case DRM_FORMAT_XRGB2101010:
+                       ctrl |= JZ_LCD_CTRL_BPP_30;
+                       break;
                }
 
                regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
@@ -440,32 +511,64 @@ void ingenic_drm_plane_config(struct device *dev,
        }
 }
 
+static void ingenic_drm_update_palette(struct ingenic_drm *priv,
+                                      const struct drm_color_lut *lut)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(priv->dma_hwdescs->palette); i++) {
+               u16 color = drm_color_lut_extract(lut[i].red, 5) << 11
+                       | drm_color_lut_extract(lut[i].green, 6) << 5
+                       | drm_color_lut_extract(lut[i].blue, 5);
+
+               priv->dma_hwdescs->palette[i] = color;
+       }
+}
+
 static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
                                            struct drm_plane_state *oldstate)
 {
        struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
        struct drm_plane_state *state = plane->state;
+       struct drm_crtc_state *crtc_state;
        struct ingenic_dma_hwdesc *hwdesc;
-       unsigned int width, height, cpp;
+       unsigned int width, height, cpp, offset;
        dma_addr_t addr;
+       u32 fourcc;
 
        if (state && state->fb) {
+               crtc_state = state->crtc->state;
+
                addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
                width = state->src_w >> 16;
                height = state->src_h >> 16;
                cpp = state->fb->format->cpp[0];
 
                if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
-                       hwdesc = priv->dma_hwdesc_f0;
+                       hwdesc = &priv->dma_hwdescs->hwdesc_f0;
                else
-                       hwdesc = priv->dma_hwdesc_f1;
+                       hwdesc = &priv->dma_hwdescs->hwdesc_f1;
 
                hwdesc->addr = addr;
                hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
 
-               if (drm_atomic_crtc_needs_modeset(state->crtc->state))
-                       ingenic_drm_plane_config(priv->dev, plane,
-                                                state->fb->format->format);
+               if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+                       fourcc = state->fb->format->format;
+
+                       ingenic_drm_plane_config(priv->dev, plane, fourcc);
+
+                       if (fourcc == DRM_FORMAT_C8)
+                               offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal);
+                       else
+                               offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+
+                       priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset;
+
+                       crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8;
+               }
+
+               if (crtc_state->color_mgmt_changed)
+                       ingenic_drm_update_palette(priv, crtc_state->gamma_lut->data);
        }
 }
 
@@ -686,6 +789,11 @@ static void ingenic_drm_unbind_all(void *d)
        component_unbind_all(priv->dev, &priv->drm);
 }
 
+static void __maybe_unused ingenic_drm_release_rmem(void *d)
+{
+       of_reserved_mem_device_release(d);
+}
+
 static int ingenic_drm_bind(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -699,6 +807,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
        void __iomem *base;
        long parent_rate;
        unsigned int i, clone_mask = 0;
+       dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
        int ret, irq;
 
        soc_info = of_device_get_match_data(dev);
@@ -707,6 +816,19 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
                return -EINVAL;
        }
 
+       if (IS_ENABLED(CONFIG_OF_RESERVED_MEM)) {
+               ret = of_reserved_mem_device_init(dev);
+
+               if (ret && ret != -ENODEV)
+                       dev_warn(dev, "Failed to get reserved memory: %d\n", ret);
+
+               if (!ret) {
+                       ret = devm_add_action_or_reset(dev, ingenic_drm_release_rmem, dev);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
                                  struct ingenic_drm, drm);
        if (IS_ERR(priv))
@@ -760,26 +882,34 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
                return PTR_ERR(priv->pix_clk);
        }
 
-       priv->dma_hwdesc_f1 = dmam_alloc_coherent(dev, sizeof(*priv->dma_hwdesc_f1),
-                                                 &priv->dma_hwdesc_phys_f1,
-                                                 GFP_KERNEL);
-       if (!priv->dma_hwdesc_f1)
+       priv->dma_hwdescs = dmam_alloc_coherent(dev,
+                                               sizeof(*priv->dma_hwdescs),
+                                               &priv->dma_hwdescs_phys,
+                                               GFP_KERNEL);
+       if (!priv->dma_hwdescs)
                return -ENOMEM;
 
-       priv->dma_hwdesc_f1->next = priv->dma_hwdesc_phys_f1;
-       priv->dma_hwdesc_f1->id = 0xf1;
 
-       if (priv->soc_info->has_osd) {
-               priv->dma_hwdesc_f0 = dmam_alloc_coherent(dev,
-                                                         sizeof(*priv->dma_hwdesc_f0),
-                                                         &priv->dma_hwdesc_phys_f0,
-                                                         GFP_KERNEL);
-               if (!priv->dma_hwdesc_f0)
-                       return -ENOMEM;
+       /* Configure DMA hwdesc for foreground0 plane */
+       dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys
+               + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+       priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0;
+       priv->dma_hwdescs->hwdesc_f0.id = 0xf0;
 
-               priv->dma_hwdesc_f0->next = priv->dma_hwdesc_phys_f0;
-               priv->dma_hwdesc_f0->id = 0xf0;
-       }
+       /* Configure DMA hwdesc for foreground1 plane */
+       dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys
+               + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1);
+       priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1;
+       priv->dma_hwdescs->hwdesc_f1.id = 0xf1;
+
+       /* Configure DMA hwdesc for palette */
+       priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys
+               + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+       priv->dma_hwdescs->hwdesc_pal.id = 0xc0;
+       priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys
+               + offsetof(struct ingenic_dma_hwdescs, palette);
+       priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL
+               | (sizeof(priv->dma_hwdescs->palette) / 4);
 
        if (soc_info->has_osd)
                priv->ipu_plane = drm_plane_from_index(drm, 0);
@@ -788,8 +918,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
 
        ret = drm_universal_plane_init(drm, &priv->f1, 1,
                                       &ingenic_drm_primary_plane_funcs,
-                                      ingenic_drm_primary_formats,
-                                      ARRAY_SIZE(ingenic_drm_primary_formats),
+                                      priv->soc_info->formats_f1,
+                                      priv->soc_info->num_formats_f1,
                                       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
        if (ret) {
                dev_err(dev, "Failed to register plane: %i\n", ret);
@@ -805,14 +935,17 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
                return ret;
        }
 
+       drm_crtc_enable_color_mgmt(&priv->crtc, 0, false,
+                                  ARRAY_SIZE(priv->dma_hwdescs->palette));
+
        if (soc_info->has_osd) {
                drm_plane_helper_add(&priv->f0,
                                     &ingenic_drm_plane_helper_funcs);
 
                ret = drm_universal_plane_init(drm, &priv->f0, 1,
                                               &ingenic_drm_primary_plane_funcs,
-                                              ingenic_drm_primary_formats,
-                                              ARRAY_SIZE(ingenic_drm_primary_formats),
+                                              priv->soc_info->formats_f0,
+                                              priv->soc_info->num_formats_f0,
                                               NULL, DRM_PLANE_TYPE_OVERLAY,
                                               NULL);
                if (ret) {
@@ -927,23 +1060,35 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
        }
 
        /* Set address of our DMA descriptor chain */
-       regmap_write(priv->map, JZ_REG_LCD_DA0, priv->dma_hwdesc_phys_f0);
-       regmap_write(priv->map, JZ_REG_LCD_DA1, priv->dma_hwdesc_phys_f1);
+       regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0);
+       regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1);
 
        /* Enable OSD if available */
        if (soc_info->has_osd)
                regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
 
+       mutex_init(&priv->clk_mutex);
+       priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
+
+       parent_clk = clk_get_parent(priv->pix_clk);
+       ret = clk_notifier_register(parent_clk, &priv->clock_nb);
+       if (ret) {
+               dev_err(dev, "Unable to register clock notifier\n");
+               goto err_devclk_disable;
+       }
+
        ret = drm_dev_register(drm, 0);
        if (ret) {
                dev_err(dev, "Failed to register DRM driver\n");
-               goto err_devclk_disable;
+               goto err_clk_notifier_unregister;
        }
 
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
+err_clk_notifier_unregister:
+       clk_notifier_unregister(parent_clk, &priv->clock_nb);
 err_devclk_disable:
        if (priv->lcd_clk)
                clk_disable_unprepare(priv->lcd_clk);
@@ -965,7 +1110,9 @@ static int compare_of(struct device *dev, void *data)
 static void ingenic_drm_unbind(struct device *dev)
 {
        struct ingenic_drm *priv = dev_get_drvdata(dev);
+       struct clk *parent_clk = clk_get_parent(priv->pix_clk);
 
+       clk_notifier_unregister(parent_clk, &priv->clock_nb);
        if (priv->lcd_clk)
                clk_disable_unprepare(priv->lcd_clk);
        clk_disable_unprepare(priv->pix_clk);
@@ -1011,11 +1158,50 @@ static int ingenic_drm_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const u32 jz4740_formats[] = {
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4725b_formats_f1[] = {
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4725b_formats_f0[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4770_formats_f1[] = {
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XRGB2101010,
+};
+
+static const u32 jz4770_formats_f0[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XRGB2101010,
+};
+
 static const struct jz_soc_info jz4740_soc_info = {
        .needs_dev_clk = true,
        .has_osd = false,
        .max_width = 800,
        .max_height = 600,
+       .formats_f1 = jz4740_formats,
+       .num_formats_f1 = ARRAY_SIZE(jz4740_formats),
+       /* JZ4740 has only one plane */
 };
 
 static const struct jz_soc_info jz4725b_soc_info = {
@@ -1023,6 +1209,10 @@ static const struct jz_soc_info jz4725b_soc_info = {
        .has_osd = true,
        .max_width = 800,
        .max_height = 600,
+       .formats_f1 = jz4725b_formats_f1,
+       .num_formats_f1 = ARRAY_SIZE(jz4725b_formats_f1),
+       .formats_f0 = jz4725b_formats_f0,
+       .num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0),
 };
 
 static const struct jz_soc_info jz4770_soc_info = {
@@ -1030,6 +1220,10 @@ static const struct jz_soc_info jz4770_soc_info = {
        .has_osd = true,
        .max_width = 1280,
        .max_height = 720,
+       .formats_f1 = jz4770_formats_f1,
+       .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+       .formats_f0 = jz4770_formats_f0,
+       .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
 };
 
 static const struct of_device_id ingenic_drm_of_match[] = {
index 43f7d959cff7e788569a120dc357ec6bfcce88bf..9b48ce02803dd76e395831a80b914b066c88be30 100644 (file)
 #define JZ_LCD_CTRL_BPP_8                      0x3
 #define JZ_LCD_CTRL_BPP_15_16                  0x4
 #define JZ_LCD_CTRL_BPP_18_24                  0x5
+#define JZ_LCD_CTRL_BPP_24_COMP                        0x6
+#define JZ_LCD_CTRL_BPP_30                     0x7
 #define JZ_LCD_CTRL_BPP_MASK                   (JZ_LCD_CTRL_RGB555 | 0x7)
 
 #define JZ_LCD_CMD_SOF_IRQ                     BIT(31)
 #define JZ_LCD_OSDCTRL_CHANGE                  BIT(3)
 #define JZ_LCD_OSDCTRL_BPP_15_16               0x4
 #define JZ_LCD_OSDCTRL_BPP_18_24               0x5
+#define JZ_LCD_OSDCTRL_BPP_24_COMP             0x6
 #define JZ_LCD_OSDCTRL_BPP_30                  0x7
 #define JZ_LCD_OSDCTRL_BPP_MASK                        (JZ_LCD_OSDCTRL_RGB555 | 0x7)
 
index c592957ed07fc7c247db06929dd8768f11a8fa06..f9b5f450a9cbbfbb6870c8edd8aa3d828f6626fc 100644 (file)
@@ -331,8 +331,8 @@ static int mcde_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq) {
-               ret = -EINVAL;
+       if (irq < 0) {
+               ret = irq;
                goto clk_disable;
        }
 
index ac038572164d3c961413af59dd35658a31c67a57..c28f5d7aac1a7a5e82919c07ede4cd68792f9cc3 100644 (file)
@@ -517,7 +517,7 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
 }
 
 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
@@ -542,7 +542,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
index 59c85c63b7cc900f4b3d22370cbac6e07f29031c..7f3398a7c2b0f392085f4c315bd330c0ba2ad9c5 100644 (file)
@@ -324,18 +324,13 @@ struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
 static struct drm_driver mtk_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
 
-       .gem_free_object_unlocked = mtk_drm_gem_free_object,
-       .gem_vm_ops = &drm_gem_cma_vm_ops,
        .dumb_create = mtk_drm_gem_dumb_create,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = mtk_drm_gem_prime_import,
-       .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
        .gem_prime_mmap = mtk_drm_gem_mmap_buf,
-       .gem_prime_vmap = mtk_drm_gem_prime_vmap,
-       .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
        .fops = &mtk_drm_fops,
 
        .name = DRIVER_NAME,
index 0583e557ad372b5c3dcc8e830d9c13b6d5413530..cdd1a6e61564101a95b1fdefab4e50c0edfa6edd 100644 (file)
@@ -8,11 +8,20 @@
 #include <drm/drm.h>
 #include <drm/drm_device.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_prime.h>
 
 #include "mtk_drm_drv.h"
 #include "mtk_drm_gem.h"
 
+static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
+       .free = mtk_drm_gem_free_object,
+       .get_sg_table = mtk_gem_prime_get_sg_table,
+       .vmap = mtk_drm_gem_prime_vmap,
+       .vunmap = mtk_drm_gem_prime_vunmap,
+       .vm_ops = &drm_gem_cma_vm_ops,
+};
+
 static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
                                                unsigned long size)
 {
@@ -25,6 +34,8 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
        if (!mtk_gem_obj)
                return ERR_PTR(-ENOMEM);
 
+       mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
+
        ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
        if (ret < 0) {
                DRM_ERROR("failed to initialize gem object\n");
index 2854272dc2d9bdb4a1648a89f9a3e6758a9cc2ed..247ce085886bf066c9ad0edc935b196164ca6aa3 100644 (file)
@@ -82,7 +82,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
 };
 
 static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
-                                         struct drm_crtc_state *old_state)
+                                         struct drm_atomic_state *state)
 {
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        struct drm_crtc_state *crtc_state = crtc->state;
@@ -118,7 +118,7 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        struct drm_crtc_state *crtc_state = crtc->state;
@@ -146,7 +146,7 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
-                                          struct drm_crtc_state *old_state)
+                                          struct drm_atomic_state *state)
 {
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        struct meson_drm *priv = meson_crtc->priv;
@@ -171,7 +171,7 @@ static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        struct meson_drm *priv = meson_crtc->priv;
index f56414a06ec416c74cdcfccfa34e817bf1def64f..6a24ce245a373606c9ab5b78b220c43924e6f3f1 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/ktime.h>
 #include <linux/bits.h>
 
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_flip_work.h>
 #include <drm/drm_mode.h>
@@ -706,8 +707,10 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 }
 
 static void dpu_crtc_disable(struct drm_crtc *crtc,
-                            struct drm_crtc_state *old_crtc_state)
+                            struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
@@ -770,7 +773,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
 }
 
 static void dpu_crtc_enable(struct drm_crtc *crtc,
-               struct drm_crtc_state *old_crtc_state)
+               struct drm_atomic_state *state)
 {
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct drm_encoder *encoder;
index a0253297bc76953279defca6b4ac690659b8f6a0..6b03ceeb5ba173e43abd4965a24cd261342a33ef 100644 (file)
@@ -264,7 +264,7 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 }
 
 static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        struct mdp4_kms *mdp4_kms = get_kms(crtc);
@@ -284,7 +284,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        struct mdp4_kms *mdp4_kms = get_kms(crtc);
index c39dad151bb6df04a7ac6e6d1639aed3d4b7e483..747dd8a7aa6e4dd6330a5d7fc3c9252f50b752e2 100644 (file)
@@ -483,7 +483,7 @@ static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
 }
 
 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
@@ -529,7 +529,7 @@ static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
 }
 
 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
index 49685571dc0eebbabcdfc551a118070781890663..aa4509766d6450364b135ed9c533a76a174dadf4 100644 (file)
@@ -972,12 +972,6 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
 };
 
-static const struct vm_operations_struct vm_ops = {
-       .fault = msm_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct file_operations fops = {
        .owner              = THIS_MODULE,
        .open               = drm_open,
@@ -1003,18 +997,11 @@ static struct drm_driver msm_driver = {
        .irq_preinstall     = msm_irq_preinstall,
        .irq_postinstall    = msm_irq_postinstall,
        .irq_uninstall      = msm_irq_uninstall,
-       .gem_free_object_unlocked = msm_gem_free_object,
-       .gem_vm_ops         = &vm_ops,
        .dumb_create        = msm_gem_dumb_create,
        .dumb_map_offset    = msm_gem_dumb_map_offset,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_pin      = msm_gem_prime_pin,
-       .gem_prime_unpin    = msm_gem_prime_unpin,
-       .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
-       .gem_prime_vmap     = msm_gem_prime_vmap,
-       .gem_prime_vunmap   = msm_gem_prime_vunmap,
        .gem_prime_mmap     = msm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
index b9dd8f8f4887243ce6669d3015d2f8cc7b2b3da3..c45789f36e48721a8c0eccb5293ef6cd3ec6ebe2 100644 (file)
@@ -276,7 +276,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
                        struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova);
index 04be4cfcccc1851f0349b0d1eebe9a30dde3d870..2e1bce7c0b1975718d5e325038850f2f325d7cdd 100644 (file)
@@ -236,7 +236,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return msm_gem_mmap_obj(vma->vm_private_data, vma);
 }
 
-vm_fault_t msm_gem_fault(struct vm_fault *vmf)
+static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@ -1000,6 +1000,22 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
        return ret;
 }
 
+static const struct vm_operations_struct vm_ops = {
+       .fault = msm_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs msm_gem_object_funcs = {
+       .free = msm_gem_free_object,
+       .pin = msm_gem_prime_pin,
+       .unpin = msm_gem_prime_unpin,
+       .get_sg_table = msm_gem_prime_get_sg_table,
+       .vmap = msm_gem_prime_vmap,
+       .vunmap = msm_gem_prime_vunmap,
+       .vm_ops = &vm_ops,
+};
+
 static int msm_gem_new_impl(struct drm_device *dev,
                uint32_t size, uint32_t flags,
                struct drm_gem_object **obj)
@@ -1030,6 +1046,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
        INIT_LIST_HEAD(&msm_obj->vmas);
 
        *obj = &msm_obj->base;
+       (*obj)->funcs = &msm_gem_object_funcs;
 
        return 0;
 }
index b721b8b262ce97346d0f6125ccd3f4fe54f54b78..956f631997f2b60c42d75f698db861929c834175 100644 (file)
@@ -302,7 +302,7 @@ static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
        struct drm_device *drm = mxsfb->drm;
@@ -326,7 +326,7 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void mxsfb_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
        struct drm_device *drm = mxsfb->drm;
index 2ee75646ad6fcb44e33435981d92e2147885d9b8..8133377d865d353f8779764a66eb7306dbf5071e 100644 (file)
@@ -28,7 +28,6 @@
  */
 
 #include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
 
 #include "nouveau_drv.h"
 #include "nouveau_chan.h"
@@ -46,6 +45,7 @@
 
 static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
                               struct ttm_resource *reg);
+static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
 
 /*
  * NV10-NV40 tiling helpers
@@ -139,7 +139,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
        struct drm_device *dev = drm->dev;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       WARN_ON(nvbo->pin_refcnt > 0);
+       WARN_ON(nvbo->bo.pin_count > 0);
        nouveau_bo_del_io_reserve_lru(bo);
        nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
 
@@ -343,37 +343,23 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 }
 
 static void
-set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
-                  uint32_t domain, uint32_t flags)
+set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
 {
        *n = 0;
 
        if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
-               struct nvif_mmu *mmu = &drm->client.mmu;
-               const u8 type = mmu->type[drm->ttm.type_vram].type;
-
                pl[*n].mem_type = TTM_PL_VRAM;
-               pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
-
-               /* Some BARs do not support being ioremapped WC */
-               if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
-                   type & NVIF_MEM_UNCACHED)
-                       pl[*n].flags &= ~TTM_PL_FLAG_WC;
-
+               pl[*n].flags = 0;
                (*n)++;
        }
        if (domain & NOUVEAU_GEM_DOMAIN_GART) {
                pl[*n].mem_type = TTM_PL_TT;
-               pl[*n].flags = flags;
-
-               if (drm->agp.bridge)
-                       pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
-
+               pl[*n].flags = 0;
                (*n)++;
        }
        if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
                pl[*n].mem_type = TTM_PL_SYSTEM;
-               pl[(*n)++].flags = flags;
+               pl[(*n)++].flags = 0;
        }
 }
 
@@ -415,19 +401,14 @@ void
 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
                         uint32_t busy)
 {
-       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_placement *pl = &nvbo->placement;
-       uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
-                                                TTM_PL_MASK_CACHING) |
-                        (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 
        pl->placement = nvbo->placements;
-       set_placement_list(drm, nvbo->placements, &pl->num_placement,
-                          domain, flags);
+       set_placement_list(nvbo->placements, &pl->num_placement, domain);
 
        pl->busy_placement = nvbo->busy_placements;
-       set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
-                          domain | busy, flags);
+       set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
+                          domain | busy);
 
        set_placement_range(nvbo, domain);
 }
@@ -453,7 +434,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
                }
        }
 
-       if (nvbo->pin_refcnt) {
+       if (nvbo->bo.pin_count) {
                bool error = evict;
 
                switch (bo->mem.mem_type) {
@@ -472,7 +453,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
                                 bo->mem.mem_type, domain);
                        ret = -EBUSY;
                }
-               nvbo->pin_refcnt++;
+               ttm_bo_pin(&nvbo->bo);
                goto out;
        }
 
@@ -483,18 +464,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
                        goto out;
        }
 
-       nvbo->pin_refcnt++;
        nouveau_bo_placement_set(nvbo, domain, 0);
-
-       /* drop pin_refcnt temporarily, so we don't trip the assertion
-        * in nouveau_bo_move() that makes sure we're not trying to
-        * move a pinned buffer
-        */
-       nvbo->pin_refcnt--;
        ret = nouveau_bo_validate(nvbo, false, false);
        if (ret)
                goto out;
-       nvbo->pin_refcnt++;
+
+       ttm_bo_pin(&nvbo->bo);
 
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
@@ -519,30 +494,14 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 {
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
-       int ret, ref;
+       int ret;
 
        ret = ttm_bo_reserve(bo, false, false, NULL);
        if (ret)
                return ret;
 
-       ref = --nvbo->pin_refcnt;
-       WARN_ON_ONCE(ref < 0);
-       if (ref)
-               goto out;
-
-       switch (bo->mem.mem_type) {
-       case TTM_PL_VRAM:
-               nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
-               break;
-       case TTM_PL_TT:
-               nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
-               break;
-       default:
-               break;
-       }
-
-       ret = nouveau_bo_validate(nvbo, false, false);
-       if (ret == 0) {
+       ttm_bo_unpin(&nvbo->bo);
+       if (!nvbo->bo.pin_count) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
                        drm->gem.vram_available += bo->mem.size;
@@ -555,9 +514,8 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
                }
        }
 
-out:
        ttm_bo_unreserve(bo);
-       return ret;
+       return 0;
 }
 
 int
@@ -588,7 +546,7 @@ void
 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 {
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-       struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+       struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
        int i;
 
        if (!ttm_dma)
@@ -598,7 +556,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
        if (nvbo->force_coherent)
                return;
 
-       for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+       for (i = 0; i < ttm_dma->num_pages; i++)
                dma_sync_single_for_device(drm->dev->dev,
                                           ttm_dma->dma_address[i],
                                           PAGE_SIZE, DMA_TO_DEVICE);
@@ -608,7 +566,7 @@ void
 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 {
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-       struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+       struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
        int i;
 
        if (!ttm_dma)
@@ -618,7 +576,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
        if (nvbo->force_coherent)
                return;
 
-       for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+       for (i = 0; i < ttm_dma->num_pages; i++)
                dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
                                        PAGE_SIZE, DMA_FROM_DEVICE);
 }
@@ -796,8 +754,9 @@ done:
 }
 
 static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
+                    struct ttm_operation_ctx *ctx,
+                    struct ttm_resource *new_reg)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_channel *chan = drm->ttm.chan;
@@ -816,7 +775,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
        }
 
        mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
-       ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
+       ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
        if (ret == 0) {
                ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
                if (ret == 0) {
@@ -903,15 +862,15 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 }
 
 static int
-nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict,
+                     struct ttm_operation_ctx *ctx,
+                     struct ttm_resource *new_reg)
 {
-       struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
        struct ttm_place placement_memtype = {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_TT,
-               .flags = TTM_PL_MASK_CACHING
+               .flags = 0
        };
        struct ttm_placement placement;
        struct ttm_resource tmp_reg;
@@ -922,11 +881,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_reg = *new_reg;
        tmp_reg.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
        if (ret)
                return ret;
 
-       ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+       ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
        if (ret)
                goto out;
 
@@ -934,26 +893,32 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
+       ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg);
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
+       ret = ttm_bo_wait_ctx(bo, ctx);
+       if (ret)
+               goto out;
+
+       nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
+       ttm_resource_free(bo, &bo->mem);
+       ttm_bo_assign_mem(bo, &tmp_reg);
 out:
        ttm_resource_free(bo, &tmp_reg);
        return ret;
 }
 
 static int
-nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict,
+                     struct ttm_operation_ctx *ctx,
+                     struct ttm_resource *new_reg)
 {
-       struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
        struct ttm_place placement_memtype = {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_TT,
-               .flags = TTM_PL_MASK_CACHING
+               .flags = 0
        };
        struct ttm_placement placement;
        struct ttm_resource tmp_reg;
@@ -964,15 +929,20 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_reg = *new_reg;
        tmp_reg.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
-       if (ret)
-               goto out;
+       ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+       if (unlikely(ret != 0))
+               return ret;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
+       ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ttm_bo_assign_mem(bo, &tmp_reg);
+       ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg);
        if (ret)
                goto out;
 
@@ -1061,17 +1031,24 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
        struct nouveau_drm_tile *new_tile = NULL;
        int ret = 0;
 
-       ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+       if (new_reg->mem_type == TTM_PL_TT) {
+               ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
+               if (ret)
+                       return ret;
+       }
+
+       nouveau_bo_move_ntfy(bo, evict, new_reg);
+       ret = ttm_bo_wait_ctx(bo, ctx);
        if (ret)
-               return ret;
+               goto out_ntfy;
 
-       if (nvbo->pin_refcnt)
+       if (nvbo->bo.pin_count)
                NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
 
        if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
                ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
                if (ret)
-                       return ret;
+                       goto out_ntfy;
        }
 
        /* Fake bo copy. */
@@ -1080,28 +1057,37 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
                goto out;
        }
 
+       if (old_reg->mem_type == TTM_PL_SYSTEM &&
+           new_reg->mem_type == TTM_PL_TT) {
+               ttm_bo_move_null(bo, new_reg);
+               goto out;
+       }
+
+       if (old_reg->mem_type == TTM_PL_TT &&
+           new_reg->mem_type == TTM_PL_SYSTEM) {
+               nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
+               ttm_resource_free(bo, &bo->mem);
+               ttm_bo_assign_mem(bo, new_reg);
+               goto out;
+       }
+
        /* Hardware assisted copy. */
        if (drm->ttm.move) {
                if (new_reg->mem_type == TTM_PL_SYSTEM)
-                       ret = nouveau_bo_move_flipd(bo, evict,
-                                                   ctx->interruptible,
-                                                   ctx->no_wait_gpu, new_reg);
+                       ret = nouveau_bo_move_flipd(bo, evict, ctx,
+                                                   new_reg);
                else if (old_reg->mem_type == TTM_PL_SYSTEM)
-                       ret = nouveau_bo_move_flips(bo, evict,
-                                                   ctx->interruptible,
-                                                   ctx->no_wait_gpu, new_reg);
+                       ret = nouveau_bo_move_flips(bo, evict, ctx,
+                                                   new_reg);
                else
-                       ret = nouveau_bo_move_m2mf(bo, evict,
-                                                  ctx->interruptible,
-                                                  ctx->no_wait_gpu, new_reg);
+                       ret = nouveau_bo_move_m2mf(bo, evict, ctx,
+                                                  new_reg);
                if (!ret)
                        goto out;
        }
 
        /* Fallback to software copy. */
-       ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-       if (ret == 0)
-               ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+       ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
 
 out:
        if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1110,7 +1096,12 @@ out:
                else
                        nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
        }
-
+out_ntfy:
+       if (ret) {
+               swap(*new_reg, bo->mem);
+               nouveau_bo_move_ntfy(bo, false, new_reg);
+               swap(*new_reg, bo->mem);
+       }
        return ret;
 }
 
@@ -1150,6 +1141,8 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
        struct nouveau_drm *drm = nouveau_bdev(bdev);
        struct nvkm_device *device = nvxx_device(&drm->client.device);
        struct nouveau_mem *mem = nouveau_mem(reg);
+       struct nvif_mmu *mmu = &drm->client.mmu;
+       const u8 type = mmu->type[drm->ttm.type_vram].type;
        int ret;
 
        mutex_lock(&drm->ttm.io_reserve_mutex);
@@ -1165,6 +1158,7 @@ retry:
                        reg->bus.offset = (reg->start << PAGE_SHIFT) +
                                drm->agp.base;
                        reg->bus.is_iomem = !drm->agp.cma;
+                       reg->bus.caching = ttm_write_combined;
                }
 #endif
                if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
@@ -1178,6 +1172,14 @@ retry:
                reg->bus.offset = (reg->start << PAGE_SHIFT) +
                        device->func->resource_addr(device, 1);
                reg->bus.is_iomem = true;
+
+               /* Some BARs do not support being ioremapped WC */
+               if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+                   type & NVIF_MEM_UNCACHED)
+                       reg->bus.caching = ttm_uncached;
+               else
+                       reg->bus.caching = ttm_write_combined;
+
                if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
                        union {
                                struct nv50_mem_map_v0 nv50;
@@ -1252,8 +1254,7 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
        mutex_unlock(&drm->ttm.io_reserve_mutex);
 }
 
-static int
-nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1269,41 +1270,45 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
                    !nvbo->kind)
                        return 0;
 
-               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-                       nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
-                                                0);
+               if (bo->mem.mem_type != TTM_PL_SYSTEM)
+                       return 0;
+
+               nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+
+       } else {
+               /* make sure bo is in mappable vram */
+               if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
+                   bo->mem.start + bo->mem.num_pages < mappable)
+                       return 0;
 
-                       ret = nouveau_bo_validate(nvbo, false, false);
-                       if (ret)
-                               return ret;
+               for (i = 0; i < nvbo->placement.num_placement; ++i) {
+                       nvbo->placements[i].fpfn = 0;
+                       nvbo->placements[i].lpfn = mappable;
                }
-               return 0;
-       }
 
-       /* make sure bo is in mappable vram */
-       if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
-           bo->mem.start + bo->mem.num_pages < mappable)
-               return 0;
+               for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+                       nvbo->busy_placements[i].fpfn = 0;
+                       nvbo->busy_placements[i].lpfn = mappable;
+               }
 
-       for (i = 0; i < nvbo->placement.num_placement; ++i) {
-               nvbo->placements[i].fpfn = 0;
-               nvbo->placements[i].lpfn = mappable;
+               nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
        }
 
-       for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
-               nvbo->busy_placements[i].fpfn = 0;
-               nvbo->busy_placements[i].lpfn = mappable;
-       }
+       ret = nouveau_bo_validate(nvbo, false, false);
+       if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
+               return VM_FAULT_NOPAGE;
+       else if (unlikely(ret))
+               return VM_FAULT_SIGBUS;
 
-       nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
-       return nouveau_bo_validate(nvbo, false, false);
+       ttm_bo_move_to_lru_tail_unlocked(bo);
+       return 0;
 }
 
 static int
 nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
                        struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-       struct ttm_dma_tt *ttm_dma = (void *)ttm;
+       struct ttm_tt *ttm_dma = (void *)ttm;
        struct nouveau_drm *drm;
        struct device *dev;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1315,32 +1320,19 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
                /* make userspace faulting work */
                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
                                                 ttm_dma->dma_address, ttm->num_pages);
-               ttm_tt_set_populated(ttm);
                return 0;
        }
 
        drm = nouveau_bdev(bdev);
        dev = drm->dev->dev;
 
-#if IS_ENABLED(CONFIG_AGP)
-       if (drm->agp.bridge) {
-               return ttm_pool_populate(ttm, ctx);
-       }
-#endif
-
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
-       if (swiotlb_nr_tbl()) {
-               return ttm_dma_populate((void *)ttm, dev, ctx);
-       }
-#endif
-       return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
+       return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
 }
 
 static void
 nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
                          struct ttm_tt *ttm)
 {
-       struct ttm_dma_tt *ttm_dma = (void *)ttm;
        struct nouveau_drm *drm;
        struct device *dev;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1351,21 +1343,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
        drm = nouveau_bdev(bdev);
        dev = drm->dev->dev;
 
-#if IS_ENABLED(CONFIG_AGP)
-       if (drm->agp.bridge) {
-               ttm_pool_unpopulate(ttm);
-               return;
-       }
-#endif
-
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
-       if (swiotlb_nr_tbl()) {
-               ttm_dma_unpopulate((void *)ttm, dev);
-               return;
-       }
-#endif
-
-       ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
+       return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
 }
 
 static void
@@ -1395,19 +1373,22 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl
                dma_resv_add_shared_fence(resv, &fence->base);
 }
 
+static void
+nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+       nouveau_bo_move_ntfy(bo, false, NULL);
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
        .ttm_tt_create = &nouveau_ttm_tt_create,
        .ttm_tt_populate = &nouveau_ttm_tt_populate,
        .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
-       .ttm_tt_bind = &nouveau_ttm_tt_bind,
-       .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
        .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = nouveau_bo_evict_flags,
-       .move_notify = nouveau_bo_move_ntfy,
+       .delete_mem_notify = nouveau_bo_delete_mem_notify,
        .move = nouveau_bo_move,
        .verify_access = nouveau_bo_verify_access,
-       .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
        .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
        .io_mem_free = &nouveau_ttm_io_mem_free,
 };
index 2a23c820743625f940b91a0eae2e136b75a40480..641ef6298a0ecdfb1e0abb2706bcdcea9a64e0b9 100644 (file)
@@ -40,9 +40,6 @@ struct nouveau_bo {
 
        struct nouveau_drm_tile *tile;
 
-       /* protect by the ttm reservation lock */
-       int pin_refcnt;
-
        struct ttm_bo_kmap_obj dma_buf_vmap;
 };
 
@@ -92,6 +89,7 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
 void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
 u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
                         bool no_wait_gpu);
index 8f099601d2f2d828d2b75a660f68d76328672753..5d191e58edf113bf83335c1741ca678fe9f87ed7 100644 (file)
@@ -107,7 +107,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                nvif_object_dtor(&chan->push.ctxdma);
                nouveau_vma_del(&chan->push.vma);
                nouveau_bo_unmap(chan->push.buffer);
-               if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+               if (chan->push.buffer && chan->push.buffer->bo.pin_count)
                        nouveau_bo_unpin(chan->push.buffer);
                nouveau_bo_ref(NULL, &chan->push.buffer);
                kfree(chan);
index 42fc5c813a9bbfd7195514af99eb20b6456854c0..d141a5f004afc9f729d2defef3739641b98f60b9 100644 (file)
@@ -820,6 +820,7 @@ static int
 nouveau_do_suspend(struct drm_device *dev, bool runtime)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct ttm_resource_manager *man;
        int ret;
 
        nouveau_svm_suspend(drm);
@@ -836,7 +837,9 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
        }
 
        NV_DEBUG(drm, "evicting buffers...\n");
-       ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+
+       man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+       ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
 
        NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
        if (drm->cechan) {
@@ -1207,16 +1210,7 @@ driver_stub = {
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_pin = nouveau_gem_prime_pin,
-       .gem_prime_unpin = nouveau_gem_prime_unpin,
-       .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
-       .gem_prime_vmap = nouveau_gem_prime_vmap,
-       .gem_prime_vunmap = nouveau_gem_prime_vunmap,
-
-       .gem_free_object_unlocked = nouveau_gem_object_del,
-       .gem_open_object = nouveau_gem_object_open,
-       .gem_close_object = nouveau_gem_object_close,
 
        .dumb_create = nouveau_display_dumb_create,
        .dumb_map_offset = nouveau_display_dumb_map_offset,
index b8025507a9e4c9de9331979cf7f8a7a7bbed675a..9d04d1b364343538e4b7a97f6f084ea72aedcef1 100644 (file)
@@ -56,7 +56,6 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_memory.h>
 #include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
 
 #include <drm/drm_audio_component.h>
 
index 549bc67feabb4b645b6b1a9b2eecf46c2b435014..dd51cd0ae20cbbcde9667be98c803cf5a4ae73ca 100644 (file)
@@ -169,6 +169,17 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
        ttm_bo_unreserve(&nvbo->bo);
 }
 
+const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
+       .free = nouveau_gem_object_del,
+       .open = nouveau_gem_object_open,
+       .close = nouveau_gem_object_close,
+       .pin = nouveau_gem_prime_pin,
+       .unpin = nouveau_gem_prime_unpin,
+       .get_sg_table = nouveau_gem_prime_get_sg_table,
+       .vmap = nouveau_gem_prime_vmap,
+       .vunmap = nouveau_gem_prime_vunmap,
+};
+
 int
 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
                uint32_t tile_mode, uint32_t tile_flags,
@@ -186,6 +197,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
        if (IS_ERR(nvbo))
                return PTR_ERR(nvbo);
 
+       nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+
        /* Initialize the embedded gem-object. We return a single gem-reference
         * to the caller, instead of a normal nouveau_bo ttm reference. */
        ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
@@ -210,7 +223,6 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
                nvbo->valid_domains &= domain;
 
-       nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
        *pnvbo = nvbo;
        return 0;
 }
index 978e07591990bd4a17f12cfdd0a28498aee2c5ea..b35c180322e21be9c4110c88e8c3a2dd069147cc 100644 (file)
@@ -5,6 +5,8 @@
 #include "nouveau_drv.h"
 #include "nouveau_bo.h"
 
+extern const struct drm_gem_object_funcs nouveau_gem_object_funcs;
+
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 {
index 9dfcce1b984695d36c9965ba8a98ef94fbb6e0f5..a1049e9feee1283bca3d2f601e4317ebe8df2654 100644 (file)
@@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
 }
 
 int
-nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
+nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
 {
        struct nouveau_mem *mem = nouveau_mem(reg);
        struct nouveau_cli *cli = mem->cli;
@@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
                mem->comp = 0;
        }
 
-       if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
-       else            args.dma = tt->dma_address;
+       if (tt->sg)
+               args.sgl = tt->sg->sgl;
+       else
+               args.dma = tt->dma_address;
 
        mutex_lock(&drm->master.lock);
        cli->base.super = true;
index 3fe1cfed57a11c5809cd0ce1bc90ddf3ee16afd1..7df3848e85aaeb661defc24f66334780b4a276f7 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __NOUVEAU_MEM_H__
 #define __NOUVEAU_MEM_H__
 #include <drm/ttm/ttm_bo_api.h>
-struct ttm_dma_tt;
+struct ttm_tt;
 
 #include <nvif/mem.h>
 #include <nvif/vmm.h>
@@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
                    struct ttm_resource *);
 void nouveau_mem_del(struct ttm_resource *);
 int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
-int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *);
+int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
 void nouveau_mem_fini(struct nouveau_mem *);
 int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
 #endif
index b2ecb91f8ddc0357e74e6dbec2c9aeed8eae1abe..a8264aebf3d47d9cf91e0073129d5ae20c12eeb0 100644 (file)
@@ -77,6 +77,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 
        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
 
+       nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+
        /* Initialize the embedded gem-object. We return a single gem-reference
         * to the caller, instead of a normal nouveau_bo ttm reference. */
        ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
index 806d9ec310f55a14c82454de6b436dad24c278c0..a2e23fd4906ce58edaee011af20f197f8cdae461 100644 (file)
@@ -5,12 +5,13 @@
 #include "nouveau_drv.h"
 #include "nouveau_mem.h"
 #include "nouveau_ttm.h"
+#include "nouveau_bo.h"
 
 struct nouveau_sgdma_be {
        /* this has to be the first field so populate/unpopulated in
         * nouve_bo.c works properly, otherwise have to move them here
         */
-       struct ttm_dma_tt ttm;
+       struct ttm_tt ttm;
        struct nouveau_mem *mem;
 };
 
@@ -22,7 +23,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
        if (ttm) {
                nouveau_sgdma_unbind(bdev, ttm);
                ttm_tt_destroy_common(bdev, ttm);
-               ttm_dma_tt_fini(&nvbe->ttm);
+               ttm_tt_fini(&nvbe->ttm);
                kfree(nvbe);
        }
 }
@@ -67,15 +68,25 @@ nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 struct ttm_tt *
 nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
 {
+       struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+       struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_sgdma_be *nvbe;
+       enum ttm_caching caching;
+
+       if (nvbo->force_coherent)
+               caching = ttm_uncached;
+       else if (drm->agp.bridge)
+               caching = ttm_write_combined;
+       else
+               caching = ttm_cached;
 
        nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
        if (!nvbe)
                return NULL;
 
-       if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+       if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
                kfree(nvbe);
                return NULL;
        }
-       return &nvbe->ttm.ttm;
+       return &nvbe->ttm;
 }
index 4273417534410cea259057dda4a8500d9d024280..2bf36229dd571f647da3ebf9419f871d71413b22 100644 (file)
@@ -22,6 +22,9 @@
  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
+
+#include <linux/swiotlb.h>
+
 #include "nouveau_drv.h"
 #include "nouveau_gem.h"
 #include "nouveau_mem.h"
@@ -108,7 +111,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
                return ret;
 
        ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
-                          reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+                          (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
        if (ret) {
                nouveau_mem_del(reg);
                return ret;
@@ -134,17 +137,19 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
        if (ret)
                return ret;
 
-       nouveau_bo_del_io_reserve_lru(bo);
+       ret = nouveau_ttm_fault_reserve_notify(bo);
+       if (ret)
+               goto error_unlock;
 
+       nouveau_bo_del_io_reserve_lru(bo);
        prot = vm_get_page_prot(vma->vm_flags);
        ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+       nouveau_bo_add_io_reserve_lru(bo);
        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
                return ret;
 
-       nouveau_bo_add_io_reserve_lru(bo);
-
+error_unlock:
        dma_resv_unlock(bo->base.resv);
-
        return ret;
 }
 
@@ -220,7 +225,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm)
 
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
                ttm_resource_manager_set_used(man, false);
-               ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+               ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
                ttm_resource_manager_cleanup(man);
                ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
                kfree(man);
@@ -265,7 +270,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
                ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
        else {
                ttm_resource_manager_set_used(man, false);
-               ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+               ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
                ttm_resource_manager_cleanup(man);
                ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
                kfree(man);
@@ -279,6 +284,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
        struct nvkm_pci *pci = device->pci;
        struct nvif_mmu *mmu = &drm->client.mmu;
        struct drm_device *dev = drm->dev;
+       bool need_swiotlb = false;
        int typei, ret;
 
        ret = nouveau_ttm_init_host(drm, 0);
@@ -313,11 +319,14 @@ nouveau_ttm_init(struct nouveau_drm *drm)
                drm->agp.cma = pci->agp.cma;
        }
 
-       ret = ttm_bo_device_init(&drm->ttm.bdev,
-                                 &nouveau_bo_driver,
-                                 dev->anon_inode->i_mapping,
-                                 dev->vma_offset_manager,
-                                 drm->client.mmu.dmabits <= 32 ? true : false);
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
+       need_swiotlb = !!swiotlb_nr_tbl();
+#endif
+
+       ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
+                                drm->dev->dev, dev->anon_inode->i_mapping,
+                                dev->vma_offset_manager, need_swiotlb,
+                                drm->client.mmu.dmabits <= 32);
        if (ret) {
                NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
                return ret;
index 328a4a74f534e0685babecfac49a3ee312d5fa12..fef3b0032fd834c4e88642f899ddf92b4e4742a5 100644 (file)
@@ -436,7 +436,7 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
 }
 
 static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -462,7 +462,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
index 53d5e184ee77cae0a8a57d4c20302be80e9805d0..2e598b8b72afbe4490553fb8a3f33acc895429ba 100644 (file)
@@ -521,12 +521,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 }
 
-static const struct vm_operations_struct omap_gem_vm_ops = {
-       .fault = omap_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct file_operations omapdriver_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@ -549,10 +543,7 @@ static struct drm_driver omap_drm_driver = {
 #endif
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = omap_gem_prime_export,
        .gem_prime_import = omap_gem_prime_import,
-       .gem_free_object_unlocked = omap_gem_free_object,
-       .gem_vm_ops = &omap_gem_vm_ops,
        .dumb_create = omap_gem_dumb_create,
        .dumb_map_offset = omap_gem_dumb_map_offset,
        .ioctls = ioctls,
index f67f223c6479fd46d4465ac7504e0c750bb25b97..d8e09792793ab6056d7a95fa4a4068ae038370d3 100644 (file)
@@ -487,7 +487,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
  * vma->vm_private_data points to the GEM object that is backing this
  * mapping.
  */
-vm_fault_t omap_gem_fault(struct vm_fault *vmf)
+static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@ -1089,7 +1089,7 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
  * Constructor & Destructor
  */
 
-void omap_gem_free_object(struct drm_gem_object *obj)
+static void omap_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct omap_drm_private *priv = dev->dev_private;
@@ -1169,6 +1169,18 @@ static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
        return true;
 }
 
+static const struct vm_operations_struct omap_gem_vm_ops = {
+       .fault = omap_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs omap_gem_object_funcs = {
+       .free = omap_gem_free_object,
+       .export = omap_gem_prime_export,
+       .vm_ops = &omap_gem_vm_ops,
+};
+
 /* GEM buffer object constructor */
 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                union omap_gem_size gsize, u32 flags)
@@ -1236,6 +1248,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                size = PAGE_ALIGN(gsize.bytes);
        }
 
+       obj->funcs = &omap_gem_object_funcs;
+
        /* Initialize the GEM object. */
        if (!(flags & OMAP_BO_MEM_SHMEM)) {
                drm_gem_private_object_init(dev, obj, size);
index 729b7812a8155b5db3f7c4153566f500e2a2fa30..eda9b4839c305aa19c46993e885e353cdf598973 100644 (file)
@@ -48,7 +48,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
                struct sg_table *sgt);
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
                union omap_gem_size gsize, u32 flags, u32 *handle);
-void omap_gem_free_object(struct drm_gem_object *obj);
 void *omap_gem_vaddr(struct drm_gem_object *obj);
 
 /* Dumb Buffers Interface */
@@ -69,7 +68,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags);
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
                struct dma_buf *buffer);
 
-vm_fault_t omap_gem_fault(struct vm_fault *vmf);
 int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
index b9dbedf8f15e8a79505af4b1ff985f55586c0c1b..e386524b2d77f9d11ab0acba5b7d3d23c53ee097 100644 (file)
@@ -208,6 +208,16 @@ config DRM_PANEL_NOVATEK_NT35510
          around the Novatek NT35510 display controller, such as some
          Hydis panels.
 
+config DRM_PANEL_NOVATEK_NT36672A
+       tristate "Novatek NT36672A DSI panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for the panels built
+         around the Novatek NT36672A display controller, such as some
+         Tianma panels used in a few Xiaomi Poco F1 mobile phones.
+
 config DRM_PANEL_NOVATEK_NT39016
        tristate "Novatek NT39016 RGB/SPI panel"
        depends on OF && SPI
@@ -450,6 +460,17 @@ config DRM_PANEL_SONY_ACX565AKM
          Say Y here if you want to enable support for the Sony ACX565AKM
          800x600 3.5" panel (found on the Nokia N900).
 
+config DRM_PANEL_TDO_TL070WSH30
+       tristate "TDO TL070WSH30 DSI panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for TDO TL070WSH30 TFT-LCD
+         panel module. The panel has a 1024×600 resolution and uses
+         24 bit RGB per pixel. It provides a MIPI DSI interface to
+         the host, a built-in LED backlight and touch controller.
+
 config DRM_PANEL_TPO_TD028TTEC1
        tristate "Toppoly (TPO) TD028TTEC1 panel driver"
        depends on OF && SPI
index 2ba560bca61d7eae6f00c9a040bd9e3588b830ae..d1f8cc572f373b766bff1c8f2b1c38be5b189c8c 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
 obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
 obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
 obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
 obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
 obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
 obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
 obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
new file mode 100644 (file)
index 0000000..533cd39
--- /dev/null
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ *
+ * This driver is for the DSI interface to panels using the NT36672A display driver IC
+ * from Novatek.
+ * Currently supported are the Tianma FHD+ panels found in some Xiaomi phones, including
+ * some variants of the Poco F1 phone.
+ *
+ * Panels using the Novatek NT37762A IC should add appropriate configuration per-panel and
+ * use this driver.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct nt36672a_panel_cmd {
+       const char data[2];
+};
+
+static const char * const nt36672a_regulator_names[] = {
+       "vddio",
+       "vddpos",
+       "vddneg",
+};
+
+static unsigned long const nt36672a_regulator_enable_loads[] = {
+       62000,
+       100000,
+       100000
+};
+
+struct nt36672a_panel_desc {
+       const struct drm_display_mode *display_mode;
+       const char *panel_name;
+
+       unsigned int width_mm;
+       unsigned int height_mm;
+
+       unsigned long mode_flags;
+       enum mipi_dsi_pixel_format format;
+       unsigned int lanes;
+
+       unsigned int num_on_cmds_1;
+       const struct nt36672a_panel_cmd *on_cmds_1;
+       unsigned int num_on_cmds_2;
+       const struct nt36672a_panel_cmd *on_cmds_2;
+
+       unsigned int num_off_cmds;
+       const struct nt36672a_panel_cmd *off_cmds;
+};
+
+struct nt36672a_panel {
+       struct drm_panel base;
+       struct mipi_dsi_device *link;
+       const struct nt36672a_panel_desc *desc;
+
+       struct regulator_bulk_data supplies[ARRAY_SIZE(nt36672a_regulator_names)];
+
+       struct gpio_desc *reset_gpio;
+
+       bool prepared;
+};
+
+static inline struct nt36672a_panel *to_nt36672a_panel(struct drm_panel *panel)
+{
+       return container_of(panel, struct nt36672a_panel, base);
+}
+
+static int nt36672a_send_cmds(struct drm_panel *panel, const struct nt36672a_panel_cmd *cmds,
+                             int num)
+{
+       struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+       unsigned int i;
+       int err;
+
+       for (i = 0; i < num; i++) {
+               const struct nt36672a_panel_cmd *cmd = &cmds[i];
+
+               err = mipi_dsi_dcs_write(pinfo->link, cmd->data[0], cmd->data + 1, 1);
+
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int nt36672a_panel_power_off(struct drm_panel *panel)
+{
+       struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+       int ret = 0;
+
+       gpiod_set_value(pinfo->reset_gpio, 1);
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
+       if (ret)
+               dev_err(panel->dev, "regulator_bulk_disable failed %d\n", ret);
+
+       return ret;
+}
+
+static int nt36672a_panel_unprepare(struct drm_panel *panel)
+{
+       struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+       int ret;
+
+       if (!pinfo->prepared)
+               return 0;
+
+       /* send off cmds */
+       ret = nt36672a_send_cmds(panel, pinfo->desc->off_cmds,
+                                pinfo->desc->num_off_cmds);
+
+       if (ret < 0)
+               dev_err(panel->dev, "failed to send DCS off cmds: %d\n", ret);
+
+       ret = mipi_dsi_dcs_set_display_off(pinfo->link);
+       if (ret < 0)
+               dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret);
+
+       /* 120ms delay required here as per DCS spec */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
+       if (ret < 0)
+               dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret);
+
+       /* 0x3C = 60ms delay */
+       msleep(60);
+
+       ret = nt36672a_panel_power_off(panel);
+       if (ret < 0)
+               dev_err(panel->dev, "power_off failed ret = %d\n", ret);
+
+       pinfo->prepared = false;
+
+       return ret;
+}
+
+static int nt36672a_panel_power_on(struct nt36672a_panel *pinfo)
+{
+       int ret;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * As per downstream kernel, Reset sequence of Tianma FHD panel requires the panel to
+        * be out of reset for 10ms, followed by being held in reset for 10ms. But for Android
+        * AOSP, we needed to bump it upto 200ms otherwise we get white screen sometimes.
+        * FIXME: Try to reduce this 200ms to a lesser value.
+        */
+       gpiod_set_value(pinfo->reset_gpio, 1);
+       msleep(200);
+       gpiod_set_value(pinfo->reset_gpio, 0);
+       msleep(200);
+
+       return 0;
+}
+
+static int nt36672a_panel_prepare(struct drm_panel *panel)
+{
+       struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+       int err;
+
+       if (pinfo->prepared)
+               return 0;
+
+       err = nt36672a_panel_power_on(pinfo);
+       if (err < 0)
+               goto poweroff;
+
+       /* send first part of init cmds */
+       err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_1,
+                                pinfo->desc->num_on_cmds_1);
+
+       if (err < 0) {
+               dev_err(panel->dev, "failed to send DCS Init 1st Code: %d\n", err);
+               goto poweroff;
+       }
+
+       err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+               goto poweroff;
+       }
+
+       /* 0x46 = 70 ms delay */
+       msleep(70);
+
+       err = mipi_dsi_dcs_set_display_on(pinfo->link);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to Set Display ON: %d\n", err);
+               goto poweroff;
+       }
+
+       /* Send rest of the init cmds */
+       err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_2,
+                                pinfo->desc->num_on_cmds_2);
+
+       if (err < 0) {
+               dev_err(panel->dev, "failed to send DCS Init 2nd Code: %d\n", err);
+               goto poweroff;
+       }
+
+       msleep(120);
+
+       pinfo->prepared = true;
+
+       return 0;
+
+poweroff:
+       gpiod_set_value(pinfo->reset_gpio, 0);
+       return err;
+}
+
+static int nt36672a_panel_get_modes(struct drm_panel *panel,
+                                   struct drm_connector *connector)
+{
+       struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+       const struct drm_display_mode *m = pinfo->desc->display_mode;
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, m);
+       if (!mode) {
+               dev_err(panel->dev, "failed to add mode %ux%u@%u\n", m->hdisplay,
+                       m->vdisplay, drm_mode_vrefresh(m));
+               return -ENOMEM;
+       }
+
+       connector->display_info.width_mm = pinfo->desc->width_mm;
+       connector->display_info.height_mm = pinfo->desc->height_mm;
+
+       drm_mode_set_name(mode);
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs panel_funcs = {
+       .unprepare = nt36672a_panel_unprepare,
+       .prepare = nt36672a_panel_prepare,
+       .get_modes = nt36672a_panel_get_modes,
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_1[] = {
+       /* skin enhancement mode */
+       { .data = {0xFF, 0x22} },
+       { .data = {0x00, 0x40} },
+       { .data = {0x01, 0xC0} },
+       { .data = {0x02, 0x40} },
+       { .data = {0x03, 0x40} },
+       { .data = {0x04, 0x40} },
+       { .data = {0x05, 0x40} },
+       { .data = {0x06, 0x40} },
+       { .data = {0x07, 0x40} },
+       { .data = {0x08, 0x40} },
+       { .data = {0x09, 0x40} },
+       { .data = {0x0A, 0x40} },
+       { .data = {0x0B, 0x40} },
+       { .data = {0x0C, 0x40} },
+       { .data = {0x0D, 0x40} },
+       { .data = {0x0E, 0x40} },
+       { .data = {0x0F, 0x40} },
+       { .data = {0x10, 0x40} },
+       { .data = {0x11, 0x50} },
+       { .data = {0x12, 0x60} },
+       { .data = {0x13, 0x70} },
+       { .data = {0x14, 0x58} },
+       { .data = {0x15, 0x68} },
+       { .data = {0x16, 0x78} },
+       { .data = {0x17, 0x77} },
+       { .data = {0x18, 0x39} },
+       { .data = {0x19, 0x2D} },
+       { .data = {0x1A, 0x2E} },
+       { .data = {0x1B, 0x32} },
+       { .data = {0x1C, 0x37} },
+       { .data = {0x1D, 0x3A} },
+       { .data = {0x1E, 0x40} },
+       { .data = {0x1F, 0x40} },
+       { .data = {0x20, 0x40} },
+       { .data = {0x21, 0x40} },
+       { .data = {0x22, 0x40} },
+       { .data = {0x23, 0x40} },
+       { .data = {0x24, 0x40} },
+       { .data = {0x25, 0x40} },
+       { .data = {0x26, 0x40} },
+       { .data = {0x27, 0x40} },
+       { .data = {0x28, 0x40} },
+       { .data = {0x2D, 0x00} },
+       { .data = {0x2F, 0x40} },
+       { .data = {0x30, 0x40} },
+       { .data = {0x31, 0x40} },
+       { .data = {0x32, 0x40} },
+       { .data = {0x33, 0x40} },
+       { .data = {0x34, 0x40} },
+       { .data = {0x35, 0x40} },
+       { .data = {0x36, 0x40} },
+       { .data = {0x37, 0x40} },
+       { .data = {0x38, 0x40} },
+       { .data = {0x39, 0x40} },
+       { .data = {0x3A, 0x40} },
+       { .data = {0x3B, 0x40} },
+       { .data = {0x3D, 0x40} },
+       { .data = {0x3F, 0x40} },
+       { .data = {0x40, 0x40} },
+       { .data = {0x41, 0x40} },
+       { .data = {0x42, 0x40} },
+       { .data = {0x43, 0x40} },
+       { .data = {0x44, 0x40} },
+       { .data = {0x45, 0x40} },
+       { .data = {0x46, 0x40} },
+       { .data = {0x47, 0x40} },
+       { .data = {0x48, 0x40} },
+       { .data = {0x49, 0x40} },
+       { .data = {0x4A, 0x40} },
+       { .data = {0x4B, 0x40} },
+       { .data = {0x4C, 0x40} },
+       { .data = {0x4D, 0x40} },
+       { .data = {0x4E, 0x40} },
+       { .data = {0x4F, 0x40} },
+       { .data = {0x50, 0x40} },
+       { .data = {0x51, 0x40} },
+       { .data = {0x52, 0x40} },
+       { .data = {0x53, 0x01} },
+       { .data = {0x54, 0x01} },
+       { .data = {0x55, 0xFE} },
+       { .data = {0x56, 0x77} },
+       { .data = {0x58, 0xCD} },
+       { .data = {0x59, 0xD0} },
+       { .data = {0x5A, 0xD0} },
+       { .data = {0x5B, 0x50} },
+       { .data = {0x5C, 0x50} },
+       { .data = {0x5D, 0x50} },
+       { .data = {0x5E, 0x50} },
+       { .data = {0x5F, 0x50} },
+       { .data = {0x60, 0x50} },
+       { .data = {0x61, 0x50} },
+       { .data = {0x62, 0x50} },
+       { .data = {0x63, 0x50} },
+       { .data = {0x64, 0x50} },
+       { .data = {0x65, 0x50} },
+       { .data = {0x66, 0x50} },
+       { .data = {0x67, 0x50} },
+       { .data = {0x68, 0x50} },
+       { .data = {0x69, 0x50} },
+       { .data = {0x6A, 0x50} },
+       { .data = {0x6B, 0x50} },
+       { .data = {0x6C, 0x50} },
+       { .data = {0x6D, 0x50} },
+       { .data = {0x6E, 0x50} },
+       { .data = {0x6F, 0x50} },
+       { .data = {0x70, 0x07} },
+       { .data = {0x71, 0x00} },
+       { .data = {0x72, 0x00} },
+       { .data = {0x73, 0x00} },
+       { .data = {0x74, 0x06} },
+       { .data = {0x75, 0x0C} },
+       { .data = {0x76, 0x03} },
+       { .data = {0x77, 0x09} },
+       { .data = {0x78, 0x0F} },
+       { .data = {0x79, 0x68} },
+       { .data = {0x7A, 0x88} },
+       { .data = {0x7C, 0x80} },
+       { .data = {0x7D, 0x80} },
+       { .data = {0x7E, 0x80} },
+       { .data = {0x7F, 0x00} },
+       { .data = {0x80, 0x00} },
+       { .data = {0x81, 0x00} },
+       { .data = {0x83, 0x01} },
+       { .data = {0x84, 0x00} },
+       { .data = {0x85, 0x80} },
+       { .data = {0x86, 0x80} },
+       { .data = {0x87, 0x80} },
+       { .data = {0x88, 0x40} },
+       { .data = {0x89, 0x91} },
+       { .data = {0x8A, 0x98} },
+       { .data = {0x8B, 0x80} },
+       { .data = {0x8C, 0x80} },
+       { .data = {0x8D, 0x80} },
+       { .data = {0x8E, 0x80} },
+       { .data = {0x8F, 0x80} },
+       { .data = {0x90, 0x80} },
+       { .data = {0x91, 0x80} },
+       { .data = {0x92, 0x80} },
+       { .data = {0x93, 0x80} },
+       { .data = {0x94, 0x80} },
+       { .data = {0x95, 0x80} },
+       { .data = {0x96, 0x80} },
+       { .data = {0x97, 0x80} },
+       { .data = {0x98, 0x80} },
+       { .data = {0x99, 0x80} },
+       { .data = {0x9A, 0x80} },
+       { .data = {0x9B, 0x80} },
+       { .data = {0x9C, 0x80} },
+       { .data = {0x9D, 0x80} },
+       { .data = {0x9E, 0x80} },
+       { .data = {0x9F, 0x80} },
+       { .data = {0xA0, 0x8A} },
+       { .data = {0xA2, 0x80} },
+       { .data = {0xA6, 0x80} },
+       { .data = {0xA7, 0x80} },
+       { .data = {0xA9, 0x80} },
+       { .data = {0xAA, 0x80} },
+       { .data = {0xAB, 0x80} },
+       { .data = {0xAC, 0x80} },
+       { .data = {0xAD, 0x80} },
+       { .data = {0xAE, 0x80} },
+       { .data = {0xAF, 0x80} },
+       { .data = {0xB7, 0x76} },
+       { .data = {0xB8, 0x76} },
+       { .data = {0xB9, 0x05} },
+       { .data = {0xBA, 0x0D} },
+       { .data = {0xBB, 0x14} },
+       { .data = {0xBC, 0x0F} },
+       { .data = {0xBD, 0x18} },
+       { .data = {0xBE, 0x1F} },
+       { .data = {0xBF, 0x05} },
+       { .data = {0xC0, 0x0D} },
+       { .data = {0xC1, 0x14} },
+       { .data = {0xC2, 0x03} },
+       { .data = {0xC3, 0x07} },
+       { .data = {0xC4, 0x0A} },
+       { .data = {0xC5, 0xA0} },
+       { .data = {0xC6, 0x55} },
+       { .data = {0xC7, 0xFF} },
+       { .data = {0xC8, 0x39} },
+       { .data = {0xC9, 0x44} },
+       { .data = {0xCA, 0x12} },
+       { .data = {0xCD, 0x80} },
+       { .data = {0xDB, 0x80} },
+       { .data = {0xDC, 0x80} },
+       { .data = {0xDD, 0x80} },
+       { .data = {0xE0, 0x80} },
+       { .data = {0xE1, 0x80} },
+       { .data = {0xE2, 0x80} },
+       { .data = {0xE3, 0x80} },
+       { .data = {0xE4, 0x80} },
+       { .data = {0xE5, 0x40} },
+       { .data = {0xE6, 0x40} },
+       { .data = {0xE7, 0x40} },
+       { .data = {0xE8, 0x40} },
+       { .data = {0xE9, 0x40} },
+       { .data = {0xEA, 0x40} },
+       { .data = {0xEB, 0x40} },
+       { .data = {0xEC, 0x40} },
+       { .data = {0xED, 0x40} },
+       { .data = {0xEE, 0x40} },
+       { .data = {0xEF, 0x40} },
+       { .data = {0xF0, 0x40} },
+       { .data = {0xF1, 0x40} },
+       { .data = {0xF2, 0x40} },
+       { .data = {0xF3, 0x40} },
+       { .data = {0xF4, 0x40} },
+       { .data = {0xF5, 0x40} },
+       { .data = {0xF6, 0x40} },
+       { .data = {0xFB, 0x1} },
+       { .data = {0xFF, 0x23} },
+       { .data = {0xFB, 0x01} },
+       /* dimming enable */
+       { .data = {0x01, 0x84} },
+       { .data = {0x05, 0x2D} },
+       { .data = {0x06, 0x00} },
+        /* resolution 1080*2246 */
+       { .data = {0x11, 0x01} },
+       { .data = {0x12, 0x7B} },
+       { .data = {0x15, 0x6F} },
+       { .data = {0x16, 0x0B} },
+        /* UI mode */
+       { .data = {0x29, 0x0A} },
+       { .data = {0x30, 0xFF} },
+       { .data = {0x31, 0xFF} },
+       { .data = {0x32, 0xFF} },
+       { .data = {0x33, 0xFF} },
+       { .data = {0x34, 0xFF} },
+       { .data = {0x35, 0xFF} },
+       { .data = {0x36, 0xFF} },
+       { .data = {0x37, 0xFF} },
+       { .data = {0x38, 0xFC} },
+       { .data = {0x39, 0xF8} },
+       { .data = {0x3A, 0xF4} },
+       { .data = {0x3B, 0xF1} },
+       { .data = {0x3D, 0xEE} },
+       { .data = {0x3F, 0xEB} },
+       { .data = {0x40, 0xE8} },
+       { .data = {0x41, 0xE5} },
+        /* STILL mode */
+       { .data = {0x2A, 0x13} },
+       { .data = {0x45, 0xFF} },
+       { .data = {0x46, 0xFF} },
+       { .data = {0x47, 0xFF} },
+       { .data = {0x48, 0xFF} },
+       { .data = {0x49, 0xFF} },
+       { .data = {0x4A, 0xFF} },
+       { .data = {0x4B, 0xFF} },
+       { .data = {0x4C, 0xFF} },
+       { .data = {0x4D, 0xED} },
+       { .data = {0x4E, 0xD5} },
+       { .data = {0x4F, 0xBF} },
+       { .data = {0x50, 0xA6} },
+       { .data = {0x51, 0x96} },
+       { .data = {0x52, 0x86} },
+       { .data = {0x53, 0x76} },
+       { .data = {0x54, 0x66} },
+        /* MOVING mode */
+       { .data = {0x2B, 0x0E} },
+       { .data = {0x58, 0xFF} },
+       { .data = {0x59, 0xFF} },
+       { .data = {0x5A, 0xFF} },
+       { .data = {0x5B, 0xFF} },
+       { .data = {0x5C, 0xFF} },
+       { .data = {0x5D, 0xFF} },
+       { .data = {0x5E, 0xFF} },
+       { .data = {0x5F, 0xFF} },
+       { .data = {0x60, 0xF6} },
+       { .data = {0x61, 0xEA} },
+       { .data = {0x62, 0xE1} },
+       { .data = {0x63, 0xD8} },
+       { .data = {0x64, 0xCE} },
+       { .data = {0x65, 0xC3} },
+       { .data = {0x66, 0xBA} },
+       { .data = {0x67, 0xB3} },
+       { .data = {0xFF, 0x25} },
+       { .data = {0xFB, 0x01} },
+       { .data = {0x05, 0x04} },
+       { .data = {0xFF, 0x26} },
+       { .data = {0xFB, 0x01} },
+       { .data = {0x1C, 0xAF} },
+       { .data = {0xFF, 0x10} },
+       { .data = {0xFB, 0x01} },
+       { .data = {0x51, 0xFF} },
+       { .data = {0x53, 0x24} },
+       { .data = {0x55, 0x00} },
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_2[] = {
+       { .data = {0xFF, 0x24} },
+       { .data = {0xFB, 0x01} },
+       { .data = {0xC3, 0x01} },
+       { .data = {0xC4, 0x54} },
+       { .data = {0xFF, 0x10} },
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_off_cmds[] = {
+       { .data = {0xFF, 0x24} },
+       { .data = {0xFB, 0x01} },
+       { .data = {0xC3, 0x01} },
+       { .data = {0xFF, 0x10} },
+};
+
+static const struct drm_display_mode tianma_fhd_video_panel_default_mode = {
+       .clock          = 161331,
+
+       .hdisplay       = 1080,
+       .hsync_start    = 1080 + 40,
+       .hsync_end      = 1080 + 40 + 20,
+       .htotal         = 1080 + 40 + 20 + 44,
+
+       .vdisplay       = 2246,
+       .vsync_start    = 2246 + 15,
+       .vsync_end      = 2246 + 15 + 2,
+       .vtotal         = 2246 + 15 + 2 + 8,
+
+       .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct nt36672a_panel_desc tianma_fhd_video_panel_desc = {
+       .display_mode = &tianma_fhd_video_panel_default_mode,
+
+       .width_mm = 68,
+       .height_mm = 136,
+
+       .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+                       | MIPI_DSI_MODE_VIDEO_HSE
+                       | MIPI_DSI_CLOCK_NON_CONTINUOUS
+                       | MIPI_DSI_MODE_VIDEO_BURST,
+       .format = MIPI_DSI_FMT_RGB888,
+       .lanes = 4,
+       .on_cmds_1 = tianma_fhd_video_on_cmds_1,
+       .num_on_cmds_1 = ARRAY_SIZE(tianma_fhd_video_on_cmds_1),
+       .on_cmds_2 = tianma_fhd_video_on_cmds_2,
+       .num_on_cmds_2 = ARRAY_SIZE(tianma_fhd_video_on_cmds_2),
+       .off_cmds = tianma_fhd_video_off_cmds,
+       .num_off_cmds = ARRAY_SIZE(tianma_fhd_video_off_cmds),
+};
+
+static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
+{
+       struct device *dev = &pinfo->link->dev;
+       int i, ret;
+
+       for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++)
+               pinfo->supplies[i].supply = nt36672a_regulator_names[i];
+
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies),
+                                     pinfo->supplies);
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+       for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
+               ret = regulator_set_load(pinfo->supplies[i].consumer,
+                                        nt36672a_regulator_enable_loads[i]);
+               if (ret)
+                       return  dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
+       }
+
+       pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(pinfo->reset_gpio))
+               return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
+                                    "failed to get reset gpio from DT\n");
+
+       drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+       drm_panel_add(&pinfo->base);
+
+       return 0;
+}
+
+static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
+{
+       struct nt36672a_panel *pinfo;
+       const struct nt36672a_panel_desc *desc;
+       int err;
+
+       pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
+       if (!pinfo)
+               return -ENOMEM;
+
+       desc = of_device_get_match_data(&dsi->dev);
+       dsi->mode_flags = desc->mode_flags;
+       dsi->format = desc->format;
+       dsi->lanes = desc->lanes;
+       pinfo->desc = desc;
+       pinfo->link = dsi;
+
+       mipi_dsi_set_drvdata(dsi, pinfo);
+
+       err = nt36672a_panel_add(pinfo);
+       if (err < 0)
+               return err;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
+{
+       struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
+       int err;
+
+       err = drm_panel_unprepare(&pinfo->base);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
+
+       err = drm_panel_disable(&pinfo->base);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
+
+       err = mipi_dsi_detach(dsi);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+       drm_panel_remove(&pinfo->base);
+
+       return 0;
+}
+
+static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
+
+       drm_panel_disable(&pinfo->base);
+       drm_panel_unprepare(&pinfo->base);
+}
+
+static const struct of_device_id tianma_fhd_video_of_match[] = {
+       { .compatible = "tianma,fhd-video", .data = &tianma_fhd_video_panel_desc },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tianma_fhd_video_of_match);
+
+static struct mipi_dsi_driver nt36672a_panel_driver = {
+       .driver = {
+               .name = "panel-tianma-nt36672a",
+               .of_match_table = tianma_fhd_video_of_match,
+       },
+       .probe = nt36672a_panel_probe,
+       .remove = nt36672a_panel_remove,
+       .shutdown = nt36672a_panel_shutdown,
+};
+module_mipi_dsi_driver(nt36672a_panel_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_DESCRIPTION("NOVATEK NT36672A based MIPI-DSI LCD panel driver");
+MODULE_LICENSE("GPL");
index b6e377aa1131b7093a40ae915d774c6bceb387c6..f80b44a8a7003e167062171667f5f57f8e0cce55 100644 (file)
@@ -99,20 +99,6 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
                dev_warn(ctx->dev, "mipi dsi dcs write buffer failed\n");
 }
 
-static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
-                                     size_t len)
-{
-       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
-       /* data will be sent in dsi hs mode (ie. no lpm) */
-       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
-
-       otm8009a_dcs_write_buf(ctx, data, len);
-
-       /* restore back the dsi lpm mode */
-       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-}
-
 #define dcs_write_seq(ctx, seq...)                     \
 ({                                                     \
        static const u8 d[] = { seq };                  \
@@ -400,7 +386,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
                 */
                data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
                data[1] = bd->props.brightness;
-               otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
+               otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
 
                /* set Brightness Control & Backlight on */
                data[1] = 0x24;
@@ -412,7 +398,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
 
        /* Update Brightness Control & Backlight */
        data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
-       otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
+       otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
 
        return 0;
 }
@@ -452,7 +438,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
        dsi->lanes = 2;
        dsi->format = MIPI_DSI_FMT_RGB888;
        dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
-                         MIPI_DSI_MODE_LPM;
+                         MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
 
        drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
                       DRM_MODE_CONNECTOR_DSI);
index f908eeafb1afd367afd4a7b2a0a0da70ed45a46d..412c0dbcb2b6f25cb93e5d98590430ab73e2cf49 100644 (file)
@@ -82,15 +82,15 @@ struct rm68200 {
 };
 
 static const struct drm_display_mode default_mode = {
-       .clock = 52582,
+       .clock = 54000,
        .hdisplay = 720,
-       .hsync_start = 720 + 38,
-       .hsync_end = 720 + 38 + 8,
-       .htotal = 720 + 38 + 8 + 38,
+       .hsync_start = 720 + 48,
+       .hsync_end = 720 + 48 + 9,
+       .htotal = 720 + 48 + 9 + 48,
        .vdisplay = 1280,
        .vsync_start = 1280 + 12,
-       .vsync_end = 1280 + 12 + 4,
-       .vtotal = 1280 + 12 + 4 + 12,
+       .vsync_end = 1280 + 12 + 5,
+       .vtotal = 1280 + 12 + 5 + 12,
        .flags = 0,
        .width_mm = 68,
        .height_mm = 122,
@@ -391,7 +391,7 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
        dsi->lanes = 2;
        dsi->format = MIPI_DSI_FMT_RGB888;
        dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
-                         MIPI_DSI_MODE_LPM;
+                         MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
 
        drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
                       DRM_MODE_CONNECTOR_DSI);
index 535c8d1cca21c29df0327d426e3fd7a09bf4dc79..a3782830ae3c4ba0882dce04c3d4cc491f8add92 100644 (file)
@@ -75,13 +75,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel)
 static int rb070d30_panel_enable(struct drm_panel *panel)
 {
        struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
-       int ret;
 
-       ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
-       if (ret)
-               return ret;
-
-       return 0;
+       return mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
 }
 
 static int rb070d30_panel_disable(struct drm_panel *panel)
index 1d1c79a186139f00c3422642150e32f9e4d94c68..0ab1b7ec84cdaf7cb68e6de4c6dddedbc39f5479 100644 (file)
@@ -214,7 +214,7 @@ static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = {
          0x00, 0x00 }
 };
 
-unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
+static const unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
        0x18, 0x19, 0x1a, 0x1b, 0x1c,
        0x1d, 0x1e, 0x1f, 0x20, 0x21
 };
index 2be358fb46f7d09657779ce172e08d29cba2bef6..8b82ec33f08aedaf72c60c053906f72cd7ec7ec4 100644 (file)
@@ -3873,6 +3873,32 @@ static const struct panel_desc winstar_wf35ltiacd = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode = {
+       .clock = 51200,
+       .hdisplay = 1024,
+       .hsync_start = 1024 + 100,
+       .hsync_end = 1024 + 100 + 100,
+       .htotal = 1024 + 100 + 100 + 120,
+       .vdisplay = 600,
+       .vsync_start = 600 + 10,
+       .vsync_end = 600 + 10 + 10,
+       .vtotal = 600 + 10 + 10 + 15,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
+       .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 154,
+               .height = 90,
+       },
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode arm_rtsm_mode[] = {
        {
                .clock = 65000,
@@ -4299,6 +4325,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "winstar,wf35ltiacd",
                .data = &winstar_wf35ltiacd,
+       }, {
+               .compatible = "yes-optoelectronics,ytc700tlag-05-201c",
+               .data = &yes_optoelectronics_ytc700tlag_05_201c,
        }, {
                /* Must be the last entry */
                .compatible = "panel-dpi",
index c22e7c49e077863648124cfdf9cf94a3e8ffd3e4..b30510b1696a04d078b5092966e163dce9312517 100644 (file)
@@ -153,7 +153,7 @@ static const struct drm_display_mode jh057n00900_mode = {
        .height_mm   = 130,
 };
 
-struct st7703_panel_desc jh057n00900_panel_desc = {
+static const struct st7703_panel_desc jh057n00900_panel_desc = {
        .mode = &jh057n00900_mode,
        .lanes = 4,
        .mode_flags = MIPI_DSI_MODE_VIDEO |
diff --git a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
new file mode 100644 (file)
index 0000000..820731b
--- /dev/null
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tdo_tl070wsh30_panel {
+       struct drm_panel base;
+       struct mipi_dsi_device *link;
+
+       struct regulator *supply;
+       struct gpio_desc *reset_gpio;
+
+       bool prepared;
+};
+
+static inline
+struct tdo_tl070wsh30_panel *to_tdo_tl070wsh30_panel(struct drm_panel *panel)
+{
+       return container_of(panel, struct tdo_tl070wsh30_panel, base);
+}
+
+static int tdo_tl070wsh30_panel_prepare(struct drm_panel *panel)
+{
+       struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel);
+       int err;
+
+       if (tdo_tl070wsh30->prepared)
+               return 0;
+
+       err = regulator_enable(tdo_tl070wsh30->supply);
+       if (err < 0)
+               return err;
+
+       usleep_range(10000, 11000);
+
+       gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 1);
+
+       usleep_range(10000, 11000);
+
+       gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 0);
+
+       msleep(200);
+
+       err = mipi_dsi_dcs_exit_sleep_mode(tdo_tl070wsh30->link);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+               regulator_disable(tdo_tl070wsh30->supply);
+               return err;
+       }
+
+       msleep(200);
+
+       err = mipi_dsi_dcs_set_display_on(tdo_tl070wsh30->link);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to set display on: %d\n", err);
+               regulator_disable(tdo_tl070wsh30->supply);
+               return err;
+       }
+
+       msleep(20);
+
+       tdo_tl070wsh30->prepared = true;
+
+       return 0;
+}
+
+static int tdo_tl070wsh30_panel_unprepare(struct drm_panel *panel)
+{
+       struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel);
+       int err;
+
+       if (!tdo_tl070wsh30->prepared)
+               return 0;
+
+       err = mipi_dsi_dcs_set_display_off(tdo_tl070wsh30->link);
+       if (err < 0)
+               dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+       usleep_range(10000, 11000);
+
+       err = mipi_dsi_dcs_enter_sleep_mode(tdo_tl070wsh30->link);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+               return err;
+       }
+
+       usleep_range(10000, 11000);
+
+       regulator_disable(tdo_tl070wsh30->supply);
+
+       tdo_tl070wsh30->prepared = false;
+
+       return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+       .clock = 47250,
+       .hdisplay = 1024,
+       .hsync_start = 1024 + 46,
+       .hsync_end = 1024 + 46 + 80,
+       .htotal = 1024 + 46 + 80 + 100,
+       .vdisplay = 600,
+       .vsync_start = 600 + 5,
+       .vsync_end = 600 + 5 + 5,
+       .vtotal = 600 + 5 + 5 + 20,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static int tdo_tl070wsh30_panel_get_modes(struct drm_panel *panel,
+                                      struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, &default_mode);
+       if (!mode) {
+               dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+                       default_mode.hdisplay, default_mode.vdisplay,
+                       drm_mode_vrefresh(&default_mode));
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+
+       drm_mode_probed_add(connector, mode);
+
+       connector->display_info.width_mm = 154;
+       connector->display_info.height_mm = 85;
+       connector->display_info.bpc = 8;
+
+       return 1;
+}
+
+static const struct drm_panel_funcs tdo_tl070wsh30_panel_funcs = {
+       .unprepare = tdo_tl070wsh30_panel_unprepare,
+       .prepare = tdo_tl070wsh30_panel_prepare,
+       .get_modes = tdo_tl070wsh30_panel_get_modes,
+};
+
+static const struct of_device_id tdo_tl070wsh30_of_match[] = {
+       { .compatible = "tdo,tl070wsh30", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tdo_tl070wsh30_of_match);
+
+static int tdo_tl070wsh30_panel_add(struct tdo_tl070wsh30_panel *tdo_tl070wsh30)
+{
+       struct device *dev = &tdo_tl070wsh30->link->dev;
+       int err;
+
+       tdo_tl070wsh30->supply = devm_regulator_get(dev, "power");
+       if (IS_ERR(tdo_tl070wsh30->supply))
+               return PTR_ERR(tdo_tl070wsh30->supply);
+
+       tdo_tl070wsh30->reset_gpio = devm_gpiod_get(dev, "reset",
+                                                   GPIOD_OUT_LOW);
+       if (IS_ERR(tdo_tl070wsh30->reset_gpio)) {
+               err = PTR_ERR(tdo_tl070wsh30->reset_gpio);
+               dev_dbg(dev, "failed to get reset gpio: %d\n", err);
+               return err;
+       }
+
+       drm_panel_init(&tdo_tl070wsh30->base, &tdo_tl070wsh30->link->dev,
+                      &tdo_tl070wsh30_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+       err = drm_panel_of_backlight(&tdo_tl070wsh30->base);
+       if (err)
+               return err;
+
+       drm_panel_add(&tdo_tl070wsh30->base);
+
+       return 0;
+}
+
+static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi)
+{
+       struct tdo_tl070wsh30_panel *tdo_tl070wsh30;
+       int err;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
+
+       tdo_tl070wsh30 = devm_kzalloc(&dsi->dev, sizeof(*tdo_tl070wsh30),
+                                   GFP_KERNEL);
+       if (!tdo_tl070wsh30)
+               return -ENOMEM;
+
+       mipi_dsi_set_drvdata(dsi, tdo_tl070wsh30);
+       tdo_tl070wsh30->link = dsi;
+
+       err = tdo_tl070wsh30_panel_add(tdo_tl070wsh30);
+       if (err < 0)
+               return err;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
+{
+       struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
+       int err;
+
+       err = mipi_dsi_detach(dsi);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+       drm_panel_remove(&tdo_tl070wsh30->base);
+       drm_panel_disable(&tdo_tl070wsh30->base);
+       drm_panel_unprepare(&tdo_tl070wsh30->base);
+
+       return 0;
+}
+
+static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
+
+       drm_panel_disable(&tdo_tl070wsh30->base);
+       drm_panel_unprepare(&tdo_tl070wsh30->base);
+}
+
+static struct mipi_dsi_driver tdo_tl070wsh30_panel_driver = {
+       .driver = {
+               .name = "panel-tdo-tl070wsh30",
+               .of_match_table = tdo_tl070wsh30_of_match,
+       },
+       .probe = tdo_tl070wsh30_panel_probe,
+       .remove = tdo_tl070wsh30_panel_remove,
+       .shutdown = tdo_tl070wsh30_panel_shutdown,
+};
+module_mipi_dsi_driver(tdo_tl070wsh30_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("TDO TL070WSH30 panel driver");
+MODULE_LICENSE("GPL v2");
index 037c14fd6bacf3efdf60f4e8025aed69eb80bf29..ba0c00d1a001c7aa36148a940dde3cc9863fba1c 100644 (file)
@@ -242,13 +242,8 @@ static int td028ttec1_prepare(struct drm_panel *panel)
 static int td028ttec1_enable(struct drm_panel *panel)
 {
        struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
-       int ret;
 
-       ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
-       if (ret)
-               return ret;
-
-       return 0;
+       return jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
 }
 
 static int td028ttec1_disable(struct drm_panel *panel)
index 8ab025d0035f37403e4cbb1b8c4f1c130aefb8ff..913eaa6d0bc6b8974c29e3cdb13ddc98318f445c 100644 (file)
@@ -29,18 +29,13 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
                                   u32 flags)
 {
        struct dev_pm_opp *opp;
-       int err;
 
        opp = devfreq_recommended_opp(dev, freq, flags);
        if (IS_ERR(opp))
                return PTR_ERR(opp);
        dev_pm_opp_put(opp);
 
-       err = dev_pm_opp_set_rate(dev, *freq);
-       if (err)
-               return err;
-
-       return 0;
+       return dev_pm_opp_set_rate(dev, *freq);
 }
 
 static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
index e6896733838ab42b5fe4c28fab12b061ccf96eb5..ea8d31863c50e98a1f6fdd09425c90bcdef39f8d 100644 (file)
 
 static int panfrost_reset_init(struct panfrost_device *pfdev)
 {
-       int err;
-
        pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
        if (IS_ERR(pfdev->rstc)) {
                dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
                return PTR_ERR(pfdev->rstc);
        }
 
-       err = reset_control_deassert(pfdev->rstc);
-       if (err)
-               return err;
-
-       return 0;
+       return reset_control_deassert(pfdev->rstc);
 }
 
 static void panfrost_reset_fini(struct panfrost_device *pfdev)
index 2e9cbd1c4a58eef76dd360dfea6d29fea47ca90e..140e004a37902f9730a22913b5beac366a11537f 100644 (file)
@@ -88,6 +88,7 @@ struct panfrost_device {
        /* pm_domains for devices with more than one. */
        struct device *pm_domain_devs[MAX_PM_DOMAINS];
        struct device_link *pm_domain_links[MAX_PM_DOMAINS];
+       bool coherent;
 
        struct panfrost_features features;
        const struct panfrost_compatible *comp;
index 37d4cb7a5491c4a58ea1a3102ac3f05032e4a9fc..6e5dedacb777c42570d6320d680fcb94b4a0c63d 100644 (file)
@@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev)
        if (!pfdev->comp)
                return -ENODEV;
 
+       pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
+
        /* Allocate and initialze the DRM device. */
        ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
        if (IS_ERR(ddev))
index 1a6cea0e0bd743baf77dfc82571f86134ea743ef..fb9f7334ce18d447fe2cce904a507f2c088ef4df 100644 (file)
@@ -220,6 +220,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
  */
 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
 {
+       struct panfrost_device *pfdev = dev->dev_private;
        struct panfrost_gem_object *obj;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -229,6 +230,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
        INIT_LIST_HEAD(&obj->mappings.list);
        mutex_init(&obj->mappings.lock);
        obj->base.base.funcs = &panfrost_gem_funcs;
+       obj->base.map_cached = pfdev->coherent;
 
        return &obj->base.base;
 }
index 30e7b7196dab059c96a32e53a0fe8680b1675c0e..4902bc6624c80e24f42c2b9c314d3bad75ecdd2d 100644 (file)
@@ -25,7 +25,8 @@
 
 struct panfrost_queue_state {
        struct drm_gpu_scheduler sched;
-
+       bool stopped;
+       struct mutex lock;
        u64 fence_context;
        u64 emit_seqno;
 };
@@ -369,6 +370,24 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
        job_write(pfdev, JOB_INT_MASK, irq_mask);
 }
 
+static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
+                                   struct drm_sched_job *bad)
+{
+       bool stopped = false;
+
+       mutex_lock(&queue->lock);
+       if (!queue->stopped) {
+               drm_sched_stop(&queue->sched, bad);
+               if (bad)
+                       drm_sched_increase_karma(bad);
+               queue->stopped = true;
+               stopped = true;
+       }
+       mutex_unlock(&queue->lock);
+
+       return stopped;
+}
+
 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
 {
        struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -392,19 +411,39 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
                job_read(pfdev, JS_TAIL_LO(js)),
                sched_job);
 
+       /* Scheduler is already stopped, nothing to do. */
+       if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
+               return;
+
        if (!mutex_trylock(&pfdev->reset_lock))
                return;
 
        for (i = 0; i < NUM_JOB_SLOTS; i++) {
                struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
 
-               drm_sched_stop(sched, sched_job);
-               if (js != i)
-                       /* Ensure any timeouts on other slots have finished */
+               /*
+                * If the queue is still active, make sure we wait for any
+                * pending timeouts.
+                */
+               if (!pfdev->js->queue[i].stopped)
                        cancel_delayed_work_sync(&sched->work_tdr);
-       }
 
-       drm_sched_increase_karma(sched_job);
+               /*
+                * If the scheduler was not already stopped, there's a tiny
+                * chance a timeout has expired just before we stopped it, and
+                * drm_sched_stop() does not flush pending works. Let's flush
+                * them now so the timeout handler doesn't get called in the
+                * middle of a reset.
+                */
+               if (panfrost_scheduler_stop(&pfdev->js->queue[i], NULL))
+                       cancel_delayed_work_sync(&sched->work_tdr);
+
+               /*
+                * Now that we cancelled the pending timeouts, we can safely
+                * reset the stopped state.
+                */
+               pfdev->js->queue[i].stopped = false;
+       }
 
        spin_lock_irqsave(&pfdev->js->job_lock, flags);
        for (i = 0; i < NUM_JOB_SLOTS; i++) {
@@ -421,11 +460,11 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
        for (i = 0; i < NUM_JOB_SLOTS; i++)
                drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
 
+       mutex_unlock(&pfdev->reset_lock);
+
        /* restart scheduler after GPU is usable again */
        for (i = 0; i < NUM_JOB_SLOTS; i++)
                drm_sched_start(&pfdev->js->queue[i].sched, true);
-
-       mutex_unlock(&pfdev->reset_lock);
 }
 
 static const struct drm_sched_backend_ops panfrost_sched_ops = {
@@ -515,6 +554,8 @@ int panfrost_job_init(struct panfrost_device *pfdev)
        }
 
        for (j = 0; j < NUM_JOB_SLOTS; j++) {
+               mutex_init(&js->queue[j].lock);
+
                js->queue[j].fence_context = dma_fence_context_alloc(1);
 
                ret = drm_sched_init(&js->queue[j].sched,
@@ -545,8 +586,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
 
        job_write(pfdev, JOB_INT_MASK, 0);
 
-       for (j = 0; j < NUM_JOB_SLOTS; j++)
+       for (j = 0; j < NUM_JOB_SLOTS; j++) {
                drm_sched_fini(&js->queue[j].sched);
+               mutex_destroy(&js->queue[j].lock);
+       }
 
 }
 
@@ -570,6 +613,8 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
 
 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
 {
+       struct panfrost_device *pfdev = panfrost_priv->pfdev;
+       struct panfrost_job_slot *js = pfdev->js;
        int i;
 
        for (i = 0; i < NUM_JOB_SLOTS; i++)
index 776448c527ea9c8cb650e858201315237a6389b9..be8d68fb0e11e214cd38562fa1adab8b8863cf7d 100644 (file)
@@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
                .pgsize_bitmap  = SZ_4K | SZ_2M,
                .ias            = FIELD_GET(0xff, pfdev->features.mmu_features),
                .oas            = FIELD_GET(0xff00, pfdev->features.mmu_features),
+               .coherent_walk  = pfdev->coherent,
                .tlb            = &mmu_tlb_ops,
                .iommu_dev      = pfdev->dev,
        };
index 46b0d1c4a16c65547092e7c60a49d3550f2cce5c..ecef8a2383d2cc2fc97e6f43cef6ecf3a8196097 100644 (file)
@@ -224,15 +224,12 @@ static struct drm_driver pl111_drm_driver = {
        .major = 1,
        .minor = 0,
        .patchlevel = 0,
+       .gem_create_object = drm_gem_cma_create_object_default_funcs,
        .dumb_create = drm_gem_cma_dumb_create,
-       .gem_free_object_unlocked = drm_gem_cma_free_object,
-       .gem_vm_ops = &drm_gem_cma_vm_ops,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = pl111_gem_import_sg_table,
-       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
        .gem_prime_mmap = drm_gem_cma_prime_mmap,
-       .gem_prime_vmap = drm_gem_cma_prime_vmap,
 
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = pl111_debugfs_init,
index 524d35b648d885045eecfb25bec0bbb7f8a5b569..183d15e2cf588591106783e0fec3ea8473eb11ec 100644 (file)
@@ -67,7 +67,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 
                seq_printf(m, "size %ld, pc %d, num releases %d\n",
                           (unsigned long)bo->tbo.base.size,
-                          bo->pin_count, rel);
+                          bo->tbo.pin_count, rel);
        }
        return 0;
 }
index 6063f3a153290d4976e17259655d9cee556c3182..45fd76e04bdce62b5e6d3df64ee67fb0058cfb1a 100644 (file)
@@ -444,13 +444,13 @@ static const struct drm_framebuffer_funcs qxl_fb_funcs = {
 };
 
 static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
        qxl_crtc_update_monitors_config(crtc, "enable");
 }
 
 static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        qxl_crtc_update_monitors_config(crtc, "disable");
 }
@@ -768,7 +768,6 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
        struct drm_gem_object *obj;
        struct qxl_bo *user_bo;
        struct qxl_surface surf;
-       int ret;
 
        if (!new_state->fb)
                return 0;
@@ -804,11 +803,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
                }
        }
 
-       ret = qxl_bo_pin(user_bo);
-       if (ret)
-               return ret;
-
-       return 0;
+       return qxl_bo_pin(user_bo);
 }
 
 static void qxl_plane_cleanup_fb(struct drm_plane *plane,
index aae90a9ee1dbcbd712544e2c8c2589a1ab3671f1..3602e8b34189717d7b0e9f1393e999a11eb2e5af 100644 (file)
@@ -80,7 +80,6 @@ struct qxl_bo {
        struct ttm_place                placements[3];
        struct ttm_placement            placement;
        struct ttm_bo_kmap_obj          kmap;
-       unsigned int pin_count;
        void                            *kptr;
        unsigned int                    map_count;
        int                             type;
index 5cea6eea72abb2ff947faf9b359962969baf5c11..0bab9ec6adc15542d0d20e49498f15453daba6f8 100644 (file)
@@ -326,8 +326,8 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       if (!qobj->pin_count) {
-               qxl_ttm_placement_from_domain(qobj, qobj->type, false);
+       if (!qobj->tbo.pin_count) {
+               qxl_ttm_placement_from_domain(qobj, qobj->type);
                ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
                if (unlikely(ret))
                        goto out;
index 2bc364412e8b8f5574d19c36a3898316b3f4cf35..547d46c14d56d54084e55898b40ea88fde20b6ed 100644 (file)
@@ -51,14 +51,12 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
        return false;
 }
 
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
 {
        u32 c = 0;
        u32 pflag = 0;
        unsigned int i;
 
-       if (pinned)
-               pflag |= TTM_PL_FLAG_NO_EVICT;
        if (qbo->tbo.base.size <= PAGE_SIZE)
                pflag |= TTM_PL_FLAG_TOPDOWN;
 
@@ -66,21 +64,21 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
        qbo->placement.busy_placement = qbo->placements;
        if (domain == QXL_GEM_DOMAIN_VRAM) {
                qbo->placements[c].mem_type = TTM_PL_VRAM;
-               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+               qbo->placements[c++].flags = pflag;
        }
        if (domain == QXL_GEM_DOMAIN_SURFACE) {
                qbo->placements[c].mem_type = TTM_PL_PRIV;
-               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+               qbo->placements[c++].flags = pflag;
                qbo->placements[c].mem_type = TTM_PL_VRAM;
-               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+               qbo->placements[c++].flags = pflag;
        }
        if (domain == QXL_GEM_DOMAIN_CPU) {
                qbo->placements[c].mem_type = TTM_PL_SYSTEM;
-               qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
+               qbo->placements[c++].flags = pflag;
        }
        if (!c) {
                qbo->placements[c].mem_type = TTM_PL_SYSTEM;
-               qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+               qbo->placements[c++].flags = 0;
        }
        qbo->placement.num_placement = c;
        qbo->placement.num_busy_placement = c;
@@ -108,6 +106,7 @@ int qxl_bo_create(struct qxl_device *qdev,
                  struct qxl_surface *surf,
                  struct qxl_bo **bo_ptr)
 {
+       struct ttm_operation_ctx ctx = { !kernel, false };
        struct qxl_bo *bo;
        enum ttm_bo_type type;
        int r;
@@ -128,18 +127,17 @@ int qxl_bo_create(struct qxl_device *qdev,
        }
        bo->tbo.base.funcs = &qxl_object_funcs;
        bo->type = domain;
-       bo->pin_count = pinned ? 1 : 0;
        bo->surface_id = 0;
        INIT_LIST_HEAD(&bo->list);
 
        if (surf)
                bo->surf = *surf;
 
-       qxl_ttm_placement_from_domain(bo, domain, pinned);
+       qxl_ttm_placement_from_domain(bo, domain);
 
-       r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
-                       &bo->placement, 0, !kernel, size,
-                       NULL, NULL, &qxl_ttm_bo_destroy);
+       r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+                                &bo->placement, 0, &ctx, size,
+                                NULL, NULL, &qxl_ttm_bo_destroy);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        dev_err(qdev->ddev.dev,
@@ -147,6 +145,9 @@ int qxl_bo_create(struct qxl_device *qdev,
                                size, domain);
                return r;
        }
+       if (pinned)
+               ttm_bo_pin(&bo->tbo);
+       ttm_bo_unreserve(&bo->tbo);
        *bo_ptr = bo;
        return 0;
 }
@@ -248,39 +249,22 @@ static int __qxl_bo_pin(struct qxl_bo *bo)
        struct drm_device *ddev = bo->tbo.base.dev;
        int r;
 
-       if (bo->pin_count) {
-               bo->pin_count++;
+       if (bo->tbo.pin_count) {
+               ttm_bo_pin(&bo->tbo);
                return 0;
        }
-       qxl_ttm_placement_from_domain(bo, bo->type, true);
+       qxl_ttm_placement_from_domain(bo, bo->type);
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (likely(r == 0)) {
-               bo->pin_count = 1;
-       }
+       if (likely(r == 0))
+               ttm_bo_pin(&bo->tbo);
        if (unlikely(r != 0))
                dev_err(ddev->dev, "%p pin failed\n", bo);
        return r;
 }
 
-static int __qxl_bo_unpin(struct qxl_bo *bo)
+static void __qxl_bo_unpin(struct qxl_bo *bo)
 {
-       struct ttm_operation_ctx ctx = { false, false };
-       struct drm_device *ddev = bo->tbo.base.dev;
-       int r, i;
-
-       if (!bo->pin_count) {
-               dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
-               return 0;
-       }
-       bo->pin_count--;
-       if (bo->pin_count)
-               return 0;
-       for (i = 0; i < bo->placement.num_placement; i++)
-               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (unlikely(r != 0))
-               dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
-       return r;
+       ttm_bo_unpin(&bo->tbo);
 }
 
 /*
@@ -314,9 +298,9 @@ int qxl_bo_unpin(struct qxl_bo *bo)
        if (r)
                return r;
 
-       r = __qxl_bo_unpin(bo);
+       __qxl_bo_unpin(bo);
        qxl_bo_unreserve(bo);
-       return r;
+       return 0;
 }
 
 void qxl_bo_force_delete(struct qxl_device *qdev)
@@ -367,10 +351,16 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
 
 int qxl_surf_evict(struct qxl_device *qdev)
 {
-       return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+       struct ttm_resource_manager *man;
+
+       man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
+       return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
 }
 
 int qxl_vram_evict(struct qxl_device *qdev)
 {
-       return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+       struct ttm_resource_manager *man;
+
+       man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
+       return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
 }
index 6b434e5ef795a910d68bd6d7bfa2af48510d5b0a..09a5c818324dbb1328b3601af153cff894bceb72 100644 (file)
@@ -58,29 +58,6 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
        return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
 }
 
-static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
-                             bool no_wait)
-{
-       int r;
-
-       r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
-       if (unlikely(r != 0)) {
-               if (r != -ERESTARTSYS) {
-                       struct drm_device *ddev = bo->tbo.base.dev;
-
-                       dev_err(ddev->dev, "%p reserve failed for wait\n",
-                               bo);
-               }
-               return r;
-       }
-       if (mem_type)
-               *mem_type = bo->tbo.mem.mem_type;
-
-       r = ttm_bo_wait(&bo->tbo, true, no_wait);
-       ttm_bo_unreserve(&bo->tbo);
-       return r;
-}
-
 extern int qxl_bo_create(struct qxl_device *qdev,
                         unsigned long size,
                         bool kernel, bool pinned, u32 domain,
@@ -94,7 +71,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
 extern void qxl_bo_unref(struct qxl_bo **bo);
 extern int qxl_bo_pin(struct qxl_bo *bo);
 extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
 
 #endif
index 4fae3e393da14994723a458e78d6d9d4510e5211..e75e364655b81ce8203b95ff45b662f067752f6a 100644 (file)
@@ -231,8 +231,8 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
        struct ttm_operation_ctx ctx = { true, false };
        int ret;
 
-       if (!bo->pin_count) {
-               qxl_ttm_placement_from_domain(bo, bo->type, false);
+       if (!bo->tbo.pin_count) {
+               qxl_ttm_placement_from_domain(bo, bo->type);
                ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                if (ret)
                        return ret;
index fd691fff8394b847cd10d0862c7380c52cc5d7d4..a80d596341437d3e551a9d2fb4657dc59c207bae 100644 (file)
@@ -32,7 +32,6 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "qxl_drv.h"
@@ -56,7 +55,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_MASK_CACHING
+               .flags = 0
        };
 
        if (!qxl_ttm_bo_is_qxl_bo(bo)) {
@@ -67,7 +66,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
                return;
        }
        qbo = to_qxl_bo(bo);
-       qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
+       qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
        *placement = qbo->placement;
 }
 
@@ -83,11 +82,13 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
        case TTM_PL_VRAM:
                mem->bus.is_iomem = true;
                mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
+               mem->bus.caching = ttm_cached;
                break;
        case TTM_PL_PRIV:
                mem->bus.is_iomem = true;
                mem->bus.offset = (mem->start << PAGE_SHIFT) +
                        qdev->surfaceram_base;
+               mem->bus.caching = ttm_cached;
                break;
        default:
                return -EINVAL;
@@ -98,59 +99,43 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
 /*
  * TTM backend functions.
  */
-struct qxl_ttm_tt {
-       struct ttm_tt                   ttm;
-       struct qxl_device               *qdev;
-       u64                             offset;
-};
-
-static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev,
-                               struct ttm_tt *ttm,
-                               struct ttm_resource *bo_mem)
-{
-       struct qxl_ttm_tt *gtt = (void *)ttm;
-
-       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
-       if (!ttm->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
-                    ttm->num_pages, bo_mem, ttm);
-       }
-       /* Not implemented */
-       return -1;
-}
-
-static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev,
-                                  struct ttm_tt *ttm)
-{
-       /* Not implemented */
-}
-
 static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
                                    struct ttm_tt *ttm)
 {
-       struct qxl_ttm_tt *gtt = (void *)ttm;
-
        ttm_tt_destroy_common(bdev, ttm);
-       ttm_tt_fini(&gtt->ttm);
-       kfree(gtt);
+       ttm_tt_fini(ttm);
+       kfree(ttm);
 }
 
 static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
                                        uint32_t page_flags)
 {
-       struct qxl_device *qdev;
-       struct qxl_ttm_tt *gtt;
+       struct ttm_tt *ttm;
 
-       qdev = qxl_get_qdev(bo->bdev);
-       gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
-       if (gtt == NULL)
+       ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+       if (ttm == NULL)
                return NULL;
-       gtt->qdev = qdev;
-       if (ttm_tt_init(&gtt->ttm, bo, page_flags)) {
-               kfree(gtt);
+       if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
+               kfree(ttm);
                return NULL;
        }
-       return &gtt->ttm;
+       return ttm;
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+                              bool evict,
+                              struct ttm_resource *new_mem)
+{
+       struct qxl_bo *qbo;
+       struct qxl_device *qdev;
+
+       if (!qxl_ttm_bo_is_qxl_bo(bo))
+               return;
+       qbo = to_qxl_bo(bo);
+       qdev = to_qxl(qbo->tbo.base.dev);
+
+       if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+               qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 }
 
 static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
@@ -160,43 +145,39 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
        struct ttm_resource *old_mem = &bo->mem;
        int ret;
 
-       ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+       qxl_bo_move_notify(bo, evict, new_mem);
+
+       ret = ttm_bo_wait_ctx(bo, ctx);
        if (ret)
-               return ret;
+               goto out;
 
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
-       return ttm_bo_move_memcpy(bo, ctx, new_mem);
+       ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+out:
+       if (ret) {
+               swap(*new_mem, bo->mem);
+               qxl_bo_move_notify(bo, false, new_mem);
+               swap(*new_mem, bo->mem);
+       }
+       return ret;
 }
 
-static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
-                              bool evict,
-                              struct ttm_resource *new_mem)
+static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
 {
-       struct qxl_bo *qbo;
-       struct qxl_device *qdev;
-
-       if (!qxl_ttm_bo_is_qxl_bo(bo))
-               return;
-       qbo = to_qxl_bo(bo);
-       qdev = to_qxl(qbo->tbo.base.dev);
-
-       if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
-               qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+       qxl_bo_move_notify(bo, false, NULL);
 }
 
 static struct ttm_bo_driver qxl_bo_driver = {
        .ttm_tt_create = &qxl_ttm_tt_create,
-       .ttm_tt_bind = &qxl_ttm_backend_bind,
        .ttm_tt_destroy = &qxl_ttm_backend_destroy,
-       .ttm_tt_unbind = &qxl_ttm_backend_unbind,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = &qxl_evict_flags,
        .move = &qxl_bo_move,
        .io_mem_reserve = &qxl_ttm_io_mem_reserve,
-       .move_notify = &qxl_bo_move_notify,
+       .delete_mem_notify = &qxl_bo_delete_mem_notify,
 };
 
 static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
@@ -212,11 +193,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
        int num_io_pages; /* != rom->num_io_pages, we include surface0 */
 
        /* No others user of address space so set it to 0 */
-       r = ttm_bo_device_init(&qdev->mman.bdev,
-                              &qxl_bo_driver,
+       r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
                               qdev->ddev.anon_inode->i_mapping,
                               qdev->ddev.vma_offset_manager,
-                              false);
+                              false, false);
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
                return r;
index a6d8de01194ae74722f0bee3b8bad0723d3fcae8..5d54bccebd4d643d38e3eadb1afcb4c1cd3fe042 100644 (file)
@@ -497,7 +497,6 @@ struct radeon_bo {
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
        u32                             flags;
-       unsigned                        pin_count;
        void                            *kptr;
        u32                             tiling_flags;
        u32                             pitch;
index e0ae911ef427dd028ad3c606dae0f1d61d1c7b7a..b79686cf8bdbd1cb30ba7964d984e15b5cc7675a 100644 (file)
@@ -273,10 +273,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
        /* unpin of the old buffer */
        r = radeon_bo_reserve(work->old_rbo, false);
        if (likely(r == 0)) {
-               r = radeon_bo_unpin(work->old_rbo);
-               if (unlikely(r != 0)) {
-                       DRM_ERROR("failed to unpin buffer after flip\n");
-               }
+               radeon_bo_unpin(work->old_rbo);
                radeon_bo_unreserve(work->old_rbo);
        } else
                DRM_ERROR("failed to reserve buffer after flip\n");
@@ -607,9 +604,7 @@ pflip_cleanup:
                DRM_ERROR("failed to reserve new rbo in error path\n");
                goto cleanup;
        }
-       if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
-               DRM_ERROR("failed to unpin new rbo in error path\n");
-       }
+       radeon_bo_unpin(new_rbo);
        radeon_bo_unreserve(new_rbo);
 
 cleanup:
index 4cd30613fa1dd2ca5115b841a928cd57ced9eb88..65061c949aeea6dbd0d9dbfd996f208269e376ba 100644 (file)
@@ -124,13 +124,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
-void radeon_gem_object_free(struct drm_gem_object *obj);
-int radeon_gem_object_open(struct drm_gem_object *obj,
-                               struct drm_file *file_priv);
-void radeon_gem_object_close(struct drm_gem_object *obj,
-                               struct drm_file *file_priv);
-struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
-                                       int flags);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
                                      unsigned int flags, int *vpos, int *hpos,
                                      ktime_t *stime, ktime_t *etime,
@@ -145,14 +138,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
 int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args);
-struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
                                                        struct dma_buf_attachment *,
                                                        struct sg_table *sg);
-int radeon_gem_prime_pin(struct drm_gem_object *obj);
-void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
-void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
@@ -550,7 +538,7 @@ long radeon_drm_ioctl(struct file *filp,
        }
 
        ret = drm_ioctl(filp, cmd, arg);
-       
+
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
        return ret;
@@ -609,22 +597,13 @@ static struct drm_driver kms_driver = {
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
        .irq_handler = radeon_driver_irq_handler_kms,
        .ioctls = radeon_ioctls_kms,
-       .gem_free_object_unlocked = radeon_gem_object_free,
-       .gem_open_object = radeon_gem_object_open,
-       .gem_close_object = radeon_gem_object_close,
        .dumb_create = radeon_mode_dumb_create,
        .dumb_map_offset = radeon_mode_dumb_mmap,
        .fops = &radeon_driver_kms_fops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = radeon_gem_prime_export,
-       .gem_prime_pin = radeon_gem_prime_pin,
-       .gem_prime_unpin = radeon_gem_prime_unpin,
-       .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
-       .gem_prime_vmap = radeon_gem_prime_vmap,
-       .gem_prime_vunmap = radeon_gem_prime_vunmap,
 
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
index e5c4271e64edec66ff8a4d79e69fe113e3526b10..0ccd7213e41ffb8789c0f12ca942aac608596e15 100644 (file)
 
 #include "radeon.h"
 
-void radeon_gem_object_free(struct drm_gem_object *gobj)
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
+                                       int flags);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+static const struct drm_gem_object_funcs radeon_gem_object_funcs;
+
+static void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
        struct radeon_bo *robj = gem_to_radeon_bo(gobj);
 
@@ -85,6 +95,7 @@ retry:
                return r;
        }
        *obj = &robj->tbo.base;
+       (*obj)->funcs = &radeon_gem_object_funcs;
        robj->pid = task_pid_nr(current);
 
        mutex_lock(&rdev->gem.mutex);
@@ -146,7 +157,7 @@ void radeon_gem_fini(struct radeon_device *rdev)
  * Call from drm_gem_handle_create which appear in both new and open ioctl
  * case.
  */
-int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
        struct radeon_bo *rbo = gem_to_radeon_bo(obj);
        struct radeon_device *rdev = rbo->rdev;
@@ -176,8 +187,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
        return 0;
 }
 
-void radeon_gem_object_close(struct drm_gem_object *obj,
-                            struct drm_file *file_priv)
+static void radeon_gem_object_close(struct drm_gem_object *obj,
+                                   struct drm_file *file_priv)
 {
        struct radeon_bo *rbo = gem_to_radeon_bo(obj);
        struct radeon_device *rdev = rbo->rdev;
@@ -216,6 +227,18 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
        return r;
 }
 
+static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
+       .free = radeon_gem_object_free,
+       .open = radeon_gem_object_open,
+       .close = radeon_gem_object_close,
+       .export = radeon_gem_prime_export,
+       .pin = radeon_gem_prime_pin,
+       .unpin = radeon_gem_prime_unpin,
+       .get_sg_table = radeon_gem_prime_get_sg_table,
+       .vmap = radeon_gem_prime_vmap,
+       .vunmap = radeon_gem_prime_vunmap,
+};
+
 /*
  * GEM ioctls.
  */
index 316e35d3f8a9dd9f7132f391de55fb1c4b6240f5..ab81e35cb060698acd6b6641c96fbe665a2f87e1 100644 (file)
@@ -113,57 +113,29 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
                        rbo->placements[c].fpfn =
                                rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
                        rbo->placements[c].mem_type = TTM_PL_VRAM;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
-                                                    TTM_PL_FLAG_UNCACHED;
+                       rbo->placements[c++].flags = 0;
                }
 
                rbo->placements[c].fpfn = 0;
                rbo->placements[c].mem_type = TTM_PL_VRAM;
-               rbo->placements[c++].flags = TTM_PL_FLAG_WC |
-                                            TTM_PL_FLAG_UNCACHED;
+               rbo->placements[c++].flags = 0;
        }
 
        if (domain & RADEON_GEM_DOMAIN_GTT) {
-               if (rbo->flags & RADEON_GEM_GTT_UC) {
-                       rbo->placements[c].fpfn = 0;
-                       rbo->placements[c].mem_type = TTM_PL_TT;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
-
-               } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
-                          (rbo->rdev->flags & RADEON_IS_AGP)) {
-                       rbo->placements[c].fpfn = 0;
-                       rbo->placements[c].mem_type = TTM_PL_TT;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
-                               TTM_PL_FLAG_UNCACHED;
-               } else {
-                       rbo->placements[c].fpfn = 0;
-                       rbo->placements[c].mem_type = TTM_PL_TT;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
-               }
+               rbo->placements[c].fpfn = 0;
+               rbo->placements[c].mem_type = TTM_PL_TT;
+               rbo->placements[c++].flags = 0;
        }
 
        if (domain & RADEON_GEM_DOMAIN_CPU) {
-               if (rbo->flags & RADEON_GEM_GTT_UC) {
-                       rbo->placements[c].fpfn = 0;
-                       rbo->placements[c].mem_type = TTM_PL_SYSTEM;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
-
-               } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
-                   rbo->rdev->flags & RADEON_IS_AGP) {
-                       rbo->placements[c].fpfn = 0;
-                       rbo->placements[c].mem_type = TTM_PL_SYSTEM;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
-                               TTM_PL_FLAG_UNCACHED;
-               } else {
-                       rbo->placements[c].fpfn = 0;
-                       rbo->placements[c].mem_type = TTM_PL_SYSTEM;
-                       rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
-               }
+               rbo->placements[c].fpfn = 0;
+               rbo->placements[c].mem_type = TTM_PL_SYSTEM;
+               rbo->placements[c++].flags = 0;
        }
        if (!c) {
                rbo->placements[c].fpfn = 0;
                rbo->placements[c].mem_type = TTM_PL_SYSTEM;
-               rbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+               rbo->placements[c++].flags = 0;
        }
 
        rbo->placement.num_placement = c;
@@ -334,8 +306,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
        if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
                return -EPERM;
 
-       if (bo->pin_count) {
-               bo->pin_count++;
+       if (bo->tbo.pin_count) {
+               ttm_bo_pin(&bo->tbo);
                if (gpu_addr)
                        *gpu_addr = radeon_bo_gpu_offset(bo);
 
@@ -367,13 +339,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
                                bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
                else
                        bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
-
-               bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
 
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (likely(r == 0)) {
-               bo->pin_count = 1;
+               ttm_bo_pin(&bo->tbo);
                if (gpu_addr != NULL)
                        *gpu_addr = radeon_bo_gpu_offset(bo);
                if (domain == RADEON_GEM_DOMAIN_VRAM)
@@ -391,36 +361,22 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
        return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
 }
 
-int radeon_bo_unpin(struct radeon_bo *bo)
+void radeon_bo_unpin(struct radeon_bo *bo)
 {
-       struct ttm_operation_ctx ctx = { false, false };
-       int r, i;
-
-       if (!bo->pin_count) {
-               dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
-               return 0;
-       }
-       bo->pin_count--;
-       if (bo->pin_count)
-               return 0;
-       for (i = 0; i < bo->placement.num_placement; i++) {
-               bo->placements[i].lpfn = 0;
-               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (likely(r == 0)) {
+       ttm_bo_unpin(&bo->tbo);
+       if (!bo->tbo.pin_count) {
                if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
                        bo->rdev->vram_pin_size -= radeon_bo_size(bo);
                else
                        bo->rdev->gart_pin_size -= radeon_bo_size(bo);
-       } else {
-               dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        }
-       return r;
 }
 
 int radeon_bo_evict_vram(struct radeon_device *rdev)
 {
+       struct ttm_bo_device *bdev = &rdev->mman.bdev;
+       struct ttm_resource_manager *man;
+
        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
 #ifndef CONFIG_HIBERNATION
        if (rdev->flags & RADEON_IS_IGP) {
@@ -429,7 +385,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
                        return 0;
        }
 #endif
-       return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+       man = ttm_manager_type(bdev, TTM_PL_VRAM);
+       return ttm_resource_manager_evict_all(bdev, man);
 }
 
 void radeon_bo_force_delete(struct radeon_device *rdev)
@@ -549,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
 
        list_for_each_entry(lobj, head, tv.head) {
                struct radeon_bo *bo = lobj->robj;
-               if (!bo->pin_count) {
+               if (!bo->tbo.pin_count) {
                        u32 domain = lobj->preferred_domains;
                        u32 allowed = lobj->allowed_domains;
                        u32 current_domain =
@@ -629,7 +586,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
                        break;
 
                old_object = reg->bo;
-               if (old_object->pin_count == 0)
+               if (old_object->tbo.pin_count == 0)
                        steal = i;
        }
 
@@ -794,7 +751,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
        radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
 }
 
-int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct ttm_operation_ctx ctx = { false, false };
        struct radeon_device *rdev;
@@ -816,8 +773,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                return 0;
 
        /* Can't move a pinned BO to visible VRAM */
-       if (rbo->pin_count > 0)
-               return -EINVAL;
+       if (rbo->tbo.pin_count > 0)
+               return VM_FAULT_SIGBUS;
 
        /* hurrah the memory is not visible ! */
        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -831,34 +788,23 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        r = ttm_bo_validate(bo, &rbo->placement, &ctx);
        if (unlikely(r == -ENOMEM)) {
                radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
-               return ttm_bo_validate(bo, &rbo->placement, &ctx);
-       } else if (unlikely(r != 0)) {
-               return r;
+               r = ttm_bo_validate(bo, &rbo->placement, &ctx);
+       } else if (likely(!r)) {
+               offset = bo->mem.start << PAGE_SHIFT;
+               /* this should never happen */
+               if ((offset + size) > rdev->mc.visible_vram_size)
+                       return VM_FAULT_SIGBUS;
        }
 
-       offset = bo->mem.start << PAGE_SHIFT;
-       /* this should never happen */
-       if ((offset + size) > rdev->mc.visible_vram_size)
-               return -EINVAL;
+       if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+               return VM_FAULT_NOPAGE;
+       else if (unlikely(r))
+               return VM_FAULT_SIGBUS;
 
+       ttm_bo_move_to_lru_tail_unlocked(bo);
        return 0;
 }
 
-int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
-{
-       int r;
-
-       r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
-       if (unlikely(r != 0))
-               return r;
-       if (mem_type)
-               *mem_type = bo->tbo.mem.mem_type;
-
-       r = ttm_bo_wait(&bo->tbo, true, no_wait);
-       ttm_bo_unreserve(&bo->tbo);
-       return r;
-}
-
 /**
  * radeon_bo_fence - add fence to buffer object
  *
index 44b47241ee42b027dccb7edacf8c424931382d88..d606e9a935e3346d546d109167b2d50ce86041c1 100644 (file)
@@ -133,9 +133,6 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
        return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
 }
 
-extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
-                         bool no_wait);
-
 extern int radeon_bo_create(struct radeon_device *rdev,
                            unsigned long size, int byte_align,
                            bool kernel, u32 domain, u32 flags,
@@ -149,7 +146,7 @@ extern void radeon_bo_unref(struct radeon_bo **bo);
 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
                                    u64 max_offset, u64 *gpu_addr);
-extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern void radeon_bo_unpin(struct radeon_bo *bo);
 extern int radeon_bo_evict_vram(struct radeon_device *rdev);
 extern void radeon_bo_force_delete(struct radeon_device *rdev);
 extern int radeon_bo_init(struct radeon_device *rdev);
@@ -166,7 +163,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                  bool evict,
                                  struct ttm_resource *new_mem);
-extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
                            bool shared);
index 36150b7f31a90aa1eece8a9f172b2b7897ab3193..95038ac3382e2187e61fdeae092264eb23c78599 100644 (file)
@@ -47,7 +47,6 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "radeon_reg.h"
@@ -59,6 +58,8 @@ static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
 static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
                              struct ttm_tt *ttm,
                              struct ttm_resource *bo_mem);
+static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
+                                struct ttm_tt *ttm);
 
 struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 {
@@ -89,7 +90,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_MASK_CACHING
+               .flags = 0
        };
 
        struct radeon_bo *rbo;
@@ -151,7 +152,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-                       bool evict, bool no_wait_gpu,
+                       bool evict,
                        struct ttm_resource *new_mem,
                        struct ttm_resource *old_mem)
 {
@@ -206,11 +207,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible,
-                               bool no_wait_gpu,
+                               bool evict,
+                               struct ttm_operation_ctx *ctx,
                                struct ttm_resource *new_mem)
 {
-       struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
        struct ttm_resource *old_mem = &bo->mem;
        struct ttm_resource tmp_mem;
        struct ttm_place placements;
@@ -226,18 +226,13 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        placements.fpfn = 0;
        placements.lpfn = 0;
        placements.mem_type = TTM_PL_TT;
-       placements.flags = TTM_PL_MASK_CACHING;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+       placements.flags = 0;
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                return r;
        }
 
-       r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
-       if (unlikely(r)) {
-               goto out_cleanup;
-       }
-
-       r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+       r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -246,22 +241,27 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+       r = radeon_move_blit(bo, true, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, &ctx, new_mem);
+       r = ttm_bo_wait_ctx(bo, ctx);
+       if (unlikely(r))
+               goto out_cleanup;
+
+       radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
+       ttm_resource_free(bo, &bo->mem);
+       ttm_bo_assign_mem(bo, new_mem);
 out_cleanup:
        ttm_resource_free(bo, &tmp_mem);
        return r;
 }
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible,
-                               bool no_wait_gpu,
+                               bool evict,
+                               struct ttm_operation_ctx *ctx,
                                struct ttm_resource *new_mem)
 {
-       struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
        struct ttm_resource *old_mem = &bo->mem;
        struct ttm_resource tmp_mem;
        struct ttm_placement placement;
@@ -277,16 +277,22 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        placements.fpfn = 0;
        placements.lpfn = 0;
        placements.mem_type = TTM_PL_TT;
-       placements.flags = TTM_PL_MASK_CACHING;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+       placements.flags = 0;
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
-       if (unlikely(r)) {
+
+       r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+       if (unlikely(r))
                goto out_cleanup;
-       }
-       r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+
+       r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem);
+       if (unlikely(r))
+               goto out_cleanup;
+
+       ttm_bo_assign_mem(bo, &tmp_mem);
+       r = radeon_move_blit(bo, true, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -304,13 +310,20 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
        struct ttm_resource *old_mem = &bo->mem;
        int r;
 
-       r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+       if (new_mem->mem_type == TTM_PL_TT) {
+               r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
+               if (r)
+                       return r;
+       }
+       radeon_bo_move_notify(bo, evict, new_mem);
+
+       r = ttm_bo_wait_ctx(bo, ctx);
        if (r)
-               return r;
+               goto fail;
 
        /* Can't move a pinned BO */
        rbo = container_of(bo, struct radeon_bo, tbo);
-       if (WARN_ON_ONCE(rbo->pin_count > 0))
+       if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
                return -EINVAL;
 
        rdev = radeon_get_rdev(bo->bdev);
@@ -318,14 +331,19 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
-       if ((old_mem->mem_type == TTM_PL_TT &&
-            new_mem->mem_type == TTM_PL_SYSTEM) ||
-           (old_mem->mem_type == TTM_PL_SYSTEM &&
-            new_mem->mem_type == TTM_PL_TT)) {
-               /* bind is enough */
+       if (old_mem->mem_type == TTM_PL_SYSTEM &&
+           new_mem->mem_type == TTM_PL_TT) {
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
+
+       if (old_mem->mem_type == TTM_PL_TT &&
+           new_mem->mem_type == TTM_PL_SYSTEM) {
+               radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
+               ttm_resource_free(bo, &bo->mem);
+               ttm_bo_assign_mem(bo, new_mem);
+               return 0;
+       }
        if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
            rdev->asic->copy.copy == NULL) {
                /* use memcpy */
@@ -334,14 +352,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
-               r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
-                                       ctx->no_wait_gpu, new_mem);
+               r = radeon_move_vram_ram(bo, evict, ctx, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
-               r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
-                                           ctx->no_wait_gpu, new_mem);
+               r = radeon_move_ram_vram(bo, evict, ctx, new_mem);
        } else {
-               r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
+               r = radeon_move_blit(bo, evict,
                                     new_mem, old_mem);
        }
 
@@ -349,13 +365,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 memcpy:
                r = ttm_bo_move_memcpy(bo, ctx, new_mem);
                if (r) {
-                       return r;
+                       goto fail;
                }
        }
 
        /* update statistics */
        atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
        return 0;
+fail:
+       swap(*new_mem, bo->mem);
+       radeon_bo_move_notify(bo, false, new_mem);
+       swap(*new_mem, bo->mem);
+       return r;
 }
 
 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
@@ -374,6 +395,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
                        mem->bus.offset = (mem->start << PAGE_SHIFT) +
                                rdev->mc.agp_base;
                        mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+                       mem->bus.caching = ttm_write_combined;
                }
 #endif
                break;
@@ -384,17 +406,13 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
                        return -EINVAL;
                mem->bus.offset += rdev->mc.aper_base;
                mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_write_combined;
 #ifdef __alpha__
                /*
                 * Alpha: use bus.addr to hold the ioremap() return,
                 * so we can modify bus.base below.
                 */
-               if (mem->placement & TTM_PL_FLAG_WC)
-                       mem->bus.addr =
-                               ioremap_wc(mem->bus.offset, bus_size);
-               else
-                       mem->bus.addr =
-                               ioremap(mem->bus.offset, bus_size);
+               mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
                if (!mem->bus.addr)
                        return -ENOMEM;
 
@@ -418,7 +436,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
  * TTM backend functions.
  */
 struct radeon_ttm_tt {
-       struct ttm_dma_tt               ttm;
+       struct ttm_tt           ttm;
        u64                             offset;
 
        uint64_t                        userptr;
@@ -545,15 +563,15 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
 
        gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
        if (!ttm->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+               WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
        }
-       if (ttm->caching_state == tt_cached)
+       if (ttm->caching == ttm_cached)
                flags |= RADEON_GART_PAGE_SNOOP;
        r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
                             ttm->pages, gtt->ttm.dma_address, flags);
        if (r) {
-               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+               DRM_ERROR("failed to bind %u pages at 0x%08X\n",
                          ttm->num_pages, (unsigned)gtt->offset);
                return r;
        }
@@ -583,7 +601,7 @@ static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt
        radeon_ttm_backend_unbind(bdev, ttm);
        ttm_tt_destroy_common(bdev, ttm);
 
-       ttm_dma_tt_fini(&gtt->ttm);
+       ttm_tt_fini(&gtt->ttm);
        kfree(gtt);
 }
 
@@ -592,6 +610,10 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
 {
        struct radeon_device *rdev;
        struct radeon_ttm_tt *gtt;
+       enum ttm_caching caching;
+       struct radeon_bo *rbo;
+
+       rbo = container_of(bo, struct radeon_bo, tbo);
 
        rdev = radeon_get_rdev(bo->bdev);
 #if IS_ENABLED(CONFIG_AGP)
@@ -605,11 +627,19 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
        if (gtt == NULL) {
                return NULL;
        }
-       if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
+
+       if (rbo->flags & RADEON_GEM_GTT_UC)
+               caching = ttm_uncached;
+       else if (rbo->flags & RADEON_GEM_GTT_WC)
+               caching = ttm_write_combined;
+       else
+               caching = ttm_cached;
+
+       if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
                kfree(gtt);
                return NULL;
        }
-       return &gtt->ttm.ttm;
+       return &gtt->ttm;
 }
 
 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
@@ -622,7 +652,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
 
        if (!ttm)
                return NULL;
-       return container_of(ttm, struct radeon_ttm_tt, ttm.ttm);
+       return container_of(ttm, struct radeon_ttm_tt, ttm);
 }
 
 static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
@@ -639,30 +669,16 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
                        return -ENOMEM;
 
                ttm->page_flags |= TTM_PAGE_FLAG_SG;
-               ttm_tt_set_populated(ttm);
                return 0;
        }
 
        if (slave && ttm->sg) {
                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
                                                 gtt->ttm.dma_address, ttm->num_pages);
-               ttm_tt_set_populated(ttm);
                return 0;
        }
 
-#if IS_ENABLED(CONFIG_AGP)
-       if (rdev->flags & RADEON_IS_AGP) {
-               return ttm_pool_populate(ttm, ctx);
-       }
-#endif
-
-#ifdef CONFIG_SWIOTLB
-       if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
-               return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
-       }
-#endif
-
-       return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
+       return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
 }
 
 static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
@@ -680,21 +696,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
        if (slave)
                return;
 
-#if IS_ENABLED(CONFIG_AGP)
-       if (rdev->flags & RADEON_IS_AGP) {
-               ttm_pool_unpopulate(ttm);
-               return;
-       }
-#endif
-
-#ifdef CONFIG_SWIOTLB
-       if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
-               ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
-               return;
-       }
-#endif
-
-       ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
+       return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
 }
 
 int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
@@ -793,19 +795,22 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
        return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 }
 
+static void
+radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+       radeon_bo_move_notify(bo, false, NULL);
+}
+
 static struct ttm_bo_driver radeon_bo_driver = {
        .ttm_tt_create = &radeon_ttm_tt_create,
        .ttm_tt_populate = &radeon_ttm_tt_populate,
        .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
-       .ttm_tt_bind = &radeon_ttm_tt_bind,
-       .ttm_tt_unbind = &radeon_ttm_tt_unbind,
        .ttm_tt_destroy = &radeon_ttm_tt_destroy,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = &radeon_evict_flags,
        .move = &radeon_bo_move,
        .verify_access = &radeon_verify_access,
-       .move_notify = &radeon_bo_move_notify,
-       .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+       .delete_mem_notify = &radeon_bo_delete_mem_notify,
        .io_mem_reserve = &radeon_ttm_io_mem_reserve,
 };
 
@@ -814,10 +819,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
        int r;
 
        /* No others user of address space so set it to 0 */
-       r = ttm_bo_device_init(&rdev->mman.bdev,
-                              &radeon_bo_driver,
+       r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
                               rdev->ddev->anon_inode->i_mapping,
                               rdev->ddev->vma_offset_manager,
+                              rdev->need_swiotlb,
                               dma_addressing_limited(&rdev->pdev->dev));
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -825,6 +830,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
        }
        rdev->mman.initialized = true;
 
+       ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
+                     dma_addressing_limited(&rdev->pdev->dev));
+
        r = radeon_ttm_init_vram(rdev);
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
@@ -906,17 +914,29 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 
 static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 {
-       struct ttm_buffer_object *bo;
-       struct radeon_device *rdev;
+       struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+       struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
        vm_fault_t ret;
 
-       bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
-       if (bo == NULL)
-               return VM_FAULT_NOPAGE;
-
-       rdev = radeon_get_rdev(bo->bdev);
        down_read(&rdev->pm.mclk_lock);
-       ret = ttm_bo_vm_fault(vmf);
+
+       ret = ttm_bo_vm_reserve(bo, vmf);
+       if (ret)
+               goto unlock_mclk;
+
+       ret = radeon_bo_fault_reserve_notify(bo);
+       if (ret)
+               goto unlock_resv;
+
+       ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+                                      TTM_BO_VM_NUM_PREFAULT, 1);
+       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+               goto unlock_mclk;
+
+unlock_resv:
+       dma_resv_unlock(bo->base.resv);
+
+unlock_mclk:
        up_read(&rdev->pm.mclk_lock);
        return ret;
 }
@@ -960,6 +980,14 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
        return 0;
 }
 
+static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+       return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
+}
 
 static int ttm_pl_vram = TTM_PL_VRAM;
 static int ttm_pl_tt = TTM_PL_TT;
@@ -967,10 +995,7 @@ static int ttm_pl_tt = TTM_PL_TT;
 static struct drm_info_list radeon_ttm_debugfs_list[] = {
        {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
        {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
-       {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
-       {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+       {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL}
 };
 
 static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
@@ -1098,11 +1123,6 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 
        count = ARRAY_SIZE(radeon_ttm_debugfs_list);
 
-#ifdef CONFIG_SWIOTLB
-       if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
-               --count;
-#endif
-
        return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
 #else
 
index fe86a3e67757124bd39f9a15d2737ad284216bdf..4c360a255849d93af1951c1677b75c6a00090b6f 100644 (file)
@@ -710,7 +710,7 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
 }
 
 static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
        struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
@@ -748,8 +748,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
        struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
        struct rcar_du_device *rcdu = rcrtc->dev;
index 0f3eb392fe39d5cc1f41d7c8a1dd074af75550ce..b7654f5e42253beb197af59e434196a2c7c32ba6 100644 (file)
@@ -212,15 +212,10 @@ static const struct file_operations rockchip_drm_driver_fops = {
 static struct drm_driver rockchip_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
        .lastclose              = drm_fb_helper_lastclose,
-       .gem_vm_ops             = &drm_gem_cma_vm_ops,
-       .gem_free_object_unlocked = rockchip_gem_free_object,
        .dumb_create            = rockchip_gem_dumb_create,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
-       .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
        .gem_prime_import_sg_table      = rockchip_gem_prime_import_sg_table,
-       .gem_prime_vmap         = rockchip_gem_prime_vmap,
-       .gem_prime_vunmap       = rockchip_gem_prime_vunmap,
        .gem_prime_mmap         = rockchip_gem_mmap_buf,
        .fops                   = &rockchip_drm_driver_fops,
        .name   = DRIVER_NAME,
index 62e5d0970525e8b6b2fefd9f86e8aec091238d8c..7d5ebb10323b864441b95401425fabd141d2bda7 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <drm/drm.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_prime.h>
 #include <drm/drm_vma_manager.h>
 
@@ -295,6 +296,14 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
        kfree(rk_obj);
 }
 
+static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
+       .free = rockchip_gem_free_object,
+       .get_sg_table = rockchip_gem_prime_get_sg_table,
+       .vmap = rockchip_gem_prime_vmap,
+       .vunmap = rockchip_gem_prime_vunmap,
+       .vm_ops = &drm_gem_cma_vm_ops,
+};
+
 static struct rockchip_gem_object *
        rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
 {
@@ -309,6 +318,8 @@ static struct rockchip_gem_object *
 
        obj = &rk_obj->base;
 
+       obj->funcs = &rockchip_gem_object_funcs;
+
        drm_gem_object_init(drm, obj, size);
 
        return rk_obj;
@@ -337,7 +348,7 @@ err_free_rk_obj:
 }
 
 /*
- * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
+ * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
  * callback function
  */
 void rockchip_gem_free_object(struct drm_gem_object *obj)
index c80f7d9fd13f81c4d92c00651f08a9c072643323..47835715b44b04a61b30c2b57dfb0dbc0c8f087c 100644 (file)
@@ -693,7 +693,7 @@ static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled)
 }
 
 static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct vop *vop = to_vop(crtc);
 
@@ -1260,8 +1260,10 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
 }
 
 static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct vop *vop = to_vop(crtc);
        const struct vop_data *vop_data = vop->data;
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
index 1463801189624a571c22c84370d80c4e33fc8246..f8ec277a6aa852d0807ecedeffc2db388a9682bf 100644 (file)
@@ -73,6 +73,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 
        init_completion(&entity->entity_idle);
 
+       /* We start in an idle state. */
+       complete(&entity->entity_idle);
+
        spin_lock_init(&entity->rq_lock);
        spsc_queue_init(&entity->job_queue);
 
index 6f37c104c46f909f057b10df0a47dc561b868636..5726746f6d1806ea2aff37cbe2bba12ff594ddec 100644 (file)
@@ -23,7 +23,7 @@
 #include "sti_vtg.h"
 
 static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
        struct sti_mixer *mixer = to_sti_mixer(crtc);
 
@@ -35,7 +35,7 @@ static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct sti_mixer *mixer = to_sti_mixer(crtc);
 
index 6e28f707092f07219bb84c09bebaabc287a600d7..e9af92d4a74b46f4e8ea109a7eca77a884567e00 100644 (file)
@@ -420,7 +420,7 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
 }
 
 static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        struct ltdc_device *ldev = crtc_to_ltdc(crtc);
        struct drm_device *ddev = crtc->dev;
@@ -442,7 +442,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct ltdc_device *ldev = crtc_to_ltdc(crtc);
        struct drm_device *ddev = crtc->dev;
index 3a153648b36980862f5712635109c54ce4ce332a..999deb64bd709ef70d053f6b090b3737641582b9 100644 (file)
@@ -101,7 +101,7 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
        struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
@@ -122,7 +122,7 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
        struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
index 424ad60b4f388c1e5f492ac26457849fa11bb7b1..52acc2f8f79887b978482576a082e515e4459d3b 100644 (file)
@@ -1742,7 +1742,7 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
 }
 
 static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct tegra_dc *dc = to_tegra_dc(crtc);
        u32 value;
@@ -1799,10 +1799,10 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        struct drm_display_mode *mode = &crtc->state->adjusted_mode;
-       struct tegra_dc_state *state = to_dc_state(crtc->state);
+       struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
        struct tegra_dc *dc = to_tegra_dc(crtc);
        u32 value;
        int err;
@@ -1882,7 +1882,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
                tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
 
        /* apply PLL and pixel clock changes */
-       tegra_dc_commit_state(dc, state);
+       tegra_dc_commit_state(dc, crtc_state);
 
        /* program display mode */
        tegra_dc_set_timings(dc, mode);
index ba9d1c3e7cacf1bedeb3e73c4f9752bda1e5b32f..f0f581cd345e6fc9d49cf7453f64816cd9f4630b 100644 (file)
@@ -858,12 +858,8 @@ static struct drm_driver tegra_drm_driver = {
        .debugfs_init = tegra_debugfs_init,
 #endif
 
-       .gem_free_object_unlocked = tegra_bo_free_object,
-       .gem_vm_ops = &tegra_bo_vm_ops,
-
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = tegra_gem_prime_export,
        .gem_prime_import = tegra_gem_prime_import,
 
        .dumb_create = tegra_bo_dumb_create,
index a2bac20ff19ddb73852f590b8605ff0514b84e8f..26af8daa9a1681b0d1c860903fc469c17434ac4e 100644 (file)
@@ -132,24 +132,29 @@ static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
 static void *tegra_bo_mmap(struct host1x_bo *bo)
 {
        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+       struct dma_buf_map map;
+       int ret;
 
-       if (obj->vaddr)
+       if (obj->vaddr) {
                return obj->vaddr;
-       else if (obj->gem.import_attach)
-               return dma_buf_vmap(obj->gem.import_attach->dmabuf);
-       else
+       } else if (obj->gem.import_attach) {
+               ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+               return ret ? NULL : map.vaddr;
+       } else {
                return vmap(obj->pages, obj->num_pages, VM_MAP,
                            pgprot_writecombine(PAGE_KERNEL));
+       }
 }
 
 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
 {
        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
 
        if (obj->vaddr)
                return;
        else if (obj->gem.import_attach)
-               dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+               dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
        else
                vunmap(addr);
 }
@@ -230,6 +235,12 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
        return 0;
 }
 
+static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
+       .free = tegra_bo_free_object,
+       .export = tegra_gem_prime_export,
+       .vm_ops = &tegra_bo_vm_ops,
+};
+
 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
                                              size_t size)
 {
@@ -240,6 +251,8 @@ static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
        if (!bo)
                return ERR_PTR(-ENOMEM);
 
+       bo->gem.funcs = &tegra_gem_object_funcs;
+
        host1x_bo_init(&bo->base, &tegra_bo_ops);
        size = round_up(size, PAGE_SIZE);
 
@@ -634,15 +647,17 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
        return __tegra_gem_mmap(gem, vma);
 }
 
-static void *tegra_gem_prime_vmap(struct dma_buf *buf)
+static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
 {
        struct drm_gem_object *gem = buf->priv;
        struct tegra_bo *bo = to_tegra_bo(gem);
 
-       return bo->vaddr;
+       dma_buf_map_set_vaddr(map, bo->vaddr);
+
+       return 0;
 }
 
-static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
+static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
 {
 }
 
index 3c5744a91d4a006fc43f86f11a2729d6055c1bc1..848b9c7b553d9d8ff671e3b9e72dcf88c7d3c6f1 100644 (file)
@@ -212,8 +212,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
        struct tidss_device *tidss = to_tidss(ddev);
@@ -255,7 +257,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
index 43e72d0b2d84d7dbd97d5b5f73eba8ae20c5243e..35067ae674eaa1a5662cf5a93753d1e6e453ac02 100644 (file)
@@ -10,6 +10,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "tidss_crtc.h"
 #include "tidss_dispc.h"
@@ -150,6 +151,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
 }
 
 static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+       .prepare_fb = drm_gem_fb_prepare_fb,
        .atomic_check = tidss_plane_atomic_check,
        .atomic_update = tidss_plane_atomic_update,
        .atomic_disable = tidss_plane_atomic_disable,
index 518220bd092a646330544fc494ac9cfc51bd8e92..da2ab2aa35777d52faf2cba66855632ae2dbe290 100644 (file)
@@ -147,12 +147,9 @@ static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
                tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
                        LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
                        LCDC_V1_UNDERFLOW_INT_ENA);
-               tilcdc_set(dev, LCDC_DMA_CTRL_REG,
-                       LCDC_V1_END_OF_FRAME_INT_ENA);
        } else {
                tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
                        LCDC_V2_UNDERFLOW_INT_ENA |
-                       LCDC_V2_END_OF_FRAME0_INT_ENA |
                        LCDC_FRAME_DONE | LCDC_SYNC_LOST);
        }
 }
@@ -484,7 +481,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 }
 
 static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
-                                     struct drm_crtc_state *old_state)
+                                     struct drm_atomic_state *state)
 {
        tilcdc_crtc_enable(crtc);
 }
@@ -532,7 +529,7 @@ static void tilcdc_crtc_disable(struct drm_crtc *crtc)
 }
 
 static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
        tilcdc_crtc_disable(crtc);
 }
@@ -678,11 +675,44 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
 
 static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
 {
+       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct tilcdc_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+       tilcdc_clear_irqstatus(dev, LCDC_END_OF_FRAME0);
+
+       if (priv->rev == 1)
+               tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+                          LCDC_V1_END_OF_FRAME_INT_ENA);
+       else
+               tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG,
+                          LCDC_V2_END_OF_FRAME0_INT_ENA);
+
+       spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
        return 0;
 }
 
 static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
 {
+       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct tilcdc_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+       if (priv->rev == 1)
+               tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
+                            LCDC_V1_END_OF_FRAME_INT_ENA);
+       else
+               tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+                            LCDC_V2_END_OF_FRAME0_INT_ENA);
+
+       spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 }
 
 static void tilcdc_crtc_reset(struct drm_crtc *crtc)
@@ -724,20 +754,6 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
        .disable_vblank = tilcdc_crtc_disable_vblank,
 };
 
-int tilcdc_crtc_max_width(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct tilcdc_drm_private *priv = dev->dev_private;
-       int max_width = 0;
-
-       if (priv->rev == 1)
-               max_width = 1024;
-       else if (priv->rev == 2)
-               max_width = 2048;
-
-       return max_width;
-}
-
 static enum drm_mode_status
 tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
                       const struct drm_display_mode *mode)
@@ -750,7 +766,7 @@ tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
         * check to see if the width is within the range that
         * the LCD Controller physically supports
         */
-       if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+       if (mode->hdisplay > priv->max_width)
                return MODE_VIRTUAL_X;
 
        /* width must be multiple of 16 */
index 4f5fc3e87383481134efb430fe6bb6536350d486..3d7e4db756b75ecae90841e4266e04eada54e776 100644 (file)
@@ -105,7 +105,7 @@ static void modeset_init(struct drm_device *dev)
 
        dev->mode_config.min_width = 0;
        dev->mode_config.min_height = 0;
-       dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+       dev->mode_config.max_width = priv->max_width;
        dev->mode_config.max_height = 2048;
        dev->mode_config.funcs = &mode_config_funcs;
 }
@@ -218,22 +218,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
                goto init_failed;
        }
 
-       if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
-               priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
-
-       DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
-
-       if (of_property_read_u32(node, "max-width", &priv->max_width))
-               priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
-
-       DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
-
-       if (of_property_read_u32(node, "max-pixelclock",
-                                       &priv->max_pixelclock))
-               priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
-
-       DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
-
        pm_runtime_enable(dev);
 
        /* Determine LCD IP Version */
@@ -287,6 +271,26 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
                }
        }
 
+       if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+               priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
+
+       DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
+
+       if (of_property_read_u32(node, "max-width", &priv->max_width)) {
+               if (priv->rev == 1)
+                       priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V1;
+               else
+                       priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V2;
+       }
+
+       DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
+
+       if (of_property_read_u32(node, "max-pixelclock",
+                                &priv->max_pixelclock))
+               priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
+
+       DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
+
        ret = tilcdc_crtc_create(ddev);
        if (ret < 0) {
                dev_err(dev, "failed to create crtc\n");
@@ -428,8 +432,8 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
 }
 
 static struct drm_info_list tilcdc_debugfs_list[] = {
-               { "regs", tilcdc_regs_show, 0 },
-               { "mm",   tilcdc_mm_show,   0 },
+               { "regs", tilcdc_regs_show, 0, NULL },
+               { "mm",   tilcdc_mm_show,   0, NULL },
 };
 
 static void tilcdc_debugfs_init(struct drm_minor *minor)
index 18815e75ca4f4397c62ce70b15e03500168e2194..d29806ca8817fcf5fb9e11fdc5a301215f800767 100644 (file)
@@ -28,8 +28,10 @@ struct drm_plane;
 
 /* Defaulting to pixel clock defined on AM335x */
 #define TILCDC_DEFAULT_MAX_PIXELCLOCK  126000
-/* Defaulting to max width as defined on AM335x */
-#define TILCDC_DEFAULT_MAX_WIDTH  2048
+/* Maximum display width for LCDC V1 */
+#define TILCDC_DEFAULT_MAX_WIDTH_V1  1024
+/* ... and for LCDC V2 found on AM335x: */
+#define TILCDC_DEFAULT_MAX_WIDTH_V2  2048
 /*
  * This may need some tweaking, but want to allow at least 1280x1024@60
  * with optimized DDR & EMIF settings tweaked 1920x1080@24 appears to
@@ -158,7 +160,6 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
                const struct tilcdc_panel_info *info);
 void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
                                        bool simulate_vesa_sync);
-int tilcdc_crtc_max_width(struct drm_crtc *crtc);
 void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
 int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
                struct drm_framebuffer *fb,
index 90c0da88cc98666dad96d412fe19675593c2a5f1..b6f5f87b270fcdb154ee25f337a271bddca38f17 100644 (file)
@@ -4,9 +4,8 @@
 
 ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
        ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
-       ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \
-       ttm_resource.o
+       ttm_execbuf_util.o ttm_range_manager.o \
+       ttm_resource.o ttm_pool.o
 ttm-$(CONFIG_AGP) += ttm_agp_backend.o
-ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
index a98fd795b7523ad355668b21afa51a4f3ced2661..03c86628e4ac4aaade39d85d5f10dd54e0edcde5 100644 (file)
@@ -34,7 +34,6 @@
 
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
 #include <drm/ttm/ttm_placement.h>
 #include <linux/agp_backend.h>
 #include <linux/module.h>
@@ -54,7 +53,7 @@ int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
        struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
        struct drm_mm_node *node = bo_mem->mm_node;
        struct agp_memory *mem;
-       int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+       int ret, cached = ttm->caching == ttm_cached;
        unsigned i;
 
        if (agp_be->mem)
@@ -136,7 +135,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
        agp_be->mem = NULL;
        agp_be->bridge = bridge;
 
-       if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
+       if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) {
                kfree(agp_be);
                return NULL;
        }
index eb4b7df02ca034ab55c963e7c5d00430bd302b19..c63b7ea1cd5d0db6f95486d236272660d16d3cca 100644 (file)
@@ -115,10 +115,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man;
 
-       if (!list_empty(&bo->lru))
-               return;
-
-       if (mem->placement & TTM_PL_FLAG_NO_EVICT)
+       if (!list_empty(&bo->lru) || bo->pin_count)
                return;
 
        man = ttm_manager_type(bdev, mem->mem_type);
@@ -165,7 +162,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
        ttm_bo_del_from_lru(bo);
        ttm_bo_add_mem_to_lru(bo, &bo->mem);
 
-       if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+       if (bulk && !bo->pin_count) {
                switch (bo->mem.mem_type) {
                case TTM_PL_TT:
                        ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
@@ -255,49 +252,17 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                if (ret)
                        goto out_err;
 
-               ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
-               if (ret)
-                       goto out_err;
-
                if (mem->mem_type != TTM_PL_SYSTEM) {
-                       ret = ttm_tt_populate(bdev, bo->ttm, ctx);
+                       ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
                        if (ret)
                                goto out_err;
-
-                       ret = ttm_bo_tt_bind(bo, mem);
-                       if (ret)
-                               goto out_err;
-               }
-
-               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-                       if (bdev->driver->move_notify)
-                               bdev->driver->move_notify(bo, evict, mem);
-                       bo->mem = *mem;
-                       goto moved;
                }
        }
 
-       if (bdev->driver->move_notify)
-               bdev->driver->move_notify(bo, evict, mem);
-
-       if (old_man->use_tt && new_man->use_tt)
-               ret = ttm_bo_move_ttm(bo, ctx, mem);
-       else if (bdev->driver->move)
-               ret = bdev->driver->move(bo, evict, ctx, mem);
-       else
-               ret = ttm_bo_move_memcpy(bo, ctx, mem);
-
-       if (ret) {
-               if (bdev->driver->move_notify) {
-                       swap(*mem, bo->mem);
-                       bdev->driver->move_notify(bo, false, mem);
-                       swap(*mem, bo->mem);
-               }
-
+       ret = bdev->driver->move(bo, evict, ctx, mem);
+       if (ret)
                goto out_err;
-       }
 
-moved:
        ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
        return 0;
 
@@ -319,8 +284,8 @@ out_err:
 
 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 {
-       if (bo->bdev->driver->move_notify)
-               bo->bdev->driver->move_notify(bo, false, NULL);
+       if (bo->bdev->driver->delete_mem_notify)
+               bo->bdev->driver->delete_mem_notify(bo);
 
        ttm_bo_tt_destroy(bo);
        ttm_resource_free(bo, &bo->mem);
@@ -540,12 +505,12 @@ static void ttm_bo_release(struct kref *kref)
                spin_lock(&ttm_bo_glob.lru_lock);
 
                /*
-                * Make NO_EVICT bos immediately available to
+                * Make pinned bos immediately available to
                 * shrinkers, now that they are queued for
                 * destruction.
                 */
-               if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-                       bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+               if (bo->pin_count) {
+                       bo->pin_count = 0;
                        ttm_bo_del_from_lru(bo);
                        ttm_bo_add_mem_to_lru(bo, &bo->mem);
                }
@@ -860,35 +825,11 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
        return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
 }
 
-static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
-                                     uint32_t cur_placement,
-                                     uint32_t proposed_placement)
-{
-       uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
-       uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
-
-       /**
-        * Keep current caching if possible.
-        */
-
-       if ((cur_placement & caching) != 0)
-               result |= (cur_placement & caching);
-       else if ((TTM_PL_FLAG_CACHED & caching) != 0)
-               result |= TTM_PL_FLAG_CACHED;
-       else if ((TTM_PL_FLAG_WC & caching) != 0)
-               result |= TTM_PL_FLAG_WC;
-       else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
-               result |= TTM_PL_FLAG_UNCACHED;
-
-       return result;
-}
-
 /**
  * ttm_bo_mem_placement - check if placement is compatible
  * @bo: BO to find memory for
  * @place: where to search
  * @mem: the memory object to fill in
- * @ctx: operation context
  *
  * Check if placement is compatible and fill in mem structure.
  * Returns -EBUSY if placement won't work or negative error code.
@@ -896,23 +837,17 @@ static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
  */
 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
                                const struct ttm_place *place,
-                               struct ttm_resource *mem,
-                               struct ttm_operation_ctx *ctx)
+                               struct ttm_resource *mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man;
-       uint32_t cur_flags = 0;
 
        man = ttm_manager_type(bdev, place->mem_type);
        if (!man || !ttm_resource_manager_used(man))
                return -EBUSY;
 
-       cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
-                                         place->flags);
-       cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
-
        mem->mem_type = place->mem_type;
-       mem->placement = cur_flags;
+       mem->placement = place->flags;
 
        spin_lock(&ttm_bo_glob.lru_lock);
        ttm_bo_del_from_lru(bo);
@@ -947,7 +882,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                const struct ttm_place *place = &placement->placement[i];
                struct ttm_resource_manager *man;
 
-               ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+               ret = ttm_bo_mem_placement(bo, place, mem);
                if (ret)
                        continue;
 
@@ -973,7 +908,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
        for (i = 0; i < placement->num_busy_placement; ++i) {
                const struct ttm_place *place = &placement->busy_placement[i];
 
-               ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+               ret = ttm_bo_mem_placement(bo, place, mem);
                if (ret)
                        continue;
 
@@ -1045,8 +980,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places,
                        continue;
 
                *new_flags = heap->flags;
-               if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
-                   (mem->mem_type == heap->mem_type) &&
+               if ((mem->mem_type == heap->mem_type) &&
                    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
                     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
                        return true;
@@ -1100,9 +1034,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
                ret = ttm_bo_move_buffer(bo, placement, ctx);
                if (ret)
                        return ret;
-       } else {
-               bo->mem.placement &= TTM_PL_MASK_CACHING;
-               bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
        }
        /*
         * We might need to add a TTM.
@@ -1170,8 +1101,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
        bo->mem.bus.offset = 0;
        bo->mem.bus.addr = NULL;
        bo->moving = NULL;
-       bo->mem.placement = TTM_PL_FLAG_CACHED;
+       bo->mem.placement = 0;
        bo->acc_size = acc_size;
+       bo->pin_count = 0;
        bo->sg = sg;
        if (resv) {
                bo->base.resv = resv;
@@ -1251,19 +1183,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 }
 EXPORT_SYMBOL(ttm_bo_init);
 
-static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
-                             unsigned long bo_size,
-                             unsigned struct_size)
-{
-       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
-       size_t size = 0;
-
-       size += ttm_round_pot(struct_size);
-       size += ttm_round_pot(npages * sizeof(void *));
-       size += ttm_round_pot(sizeof(struct ttm_tt));
-       return size;
-}
-
 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
                           unsigned long bo_size,
                           unsigned struct_size)
@@ -1273,56 +1192,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
 
        size += ttm_round_pot(struct_size);
        size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
-       size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+       size += ttm_round_pot(sizeof(struct ttm_tt));
        return size;
 }
 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
 
-int ttm_bo_create(struct ttm_bo_device *bdev,
-                       unsigned long size,
-                       enum ttm_bo_type type,
-                       struct ttm_placement *placement,
-                       uint32_t page_alignment,
-                       bool interruptible,
-                       struct ttm_buffer_object **p_bo)
-{
-       struct ttm_buffer_object *bo;
-       size_t acc_size;
-       int ret;
-
-       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-       if (unlikely(bo == NULL))
-               return -ENOMEM;
-
-       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-       ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
-                         interruptible, acc_size,
-                         NULL, NULL, NULL);
-       if (likely(ret == 0))
-               *p_bo = bo;
-
-       return ret;
-}
-EXPORT_SYMBOL(ttm_bo_create);
-
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
-{
-       struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
-
-       if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
-               pr_err("Illegal memory manager memory type %u\n", mem_type);
-               return -EINVAL;
-       }
-
-       if (!man) {
-               pr_err("Memory type %u has not been initialized\n", mem_type);
-               return 0;
-       }
-
-       return ttm_resource_manager_force_list_clean(bdev, man);
-}
-EXPORT_SYMBOL(ttm_bo_evict_mm);
-
 static void ttm_bo_global_kobj_release(struct kobject *kobj)
 {
        struct ttm_bo_global *glob =
@@ -1409,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                        pr_debug("Swap list %d was clean\n", i);
        spin_unlock(&glob->lru_lock);
 
+       ttm_pool_fini(&bdev->pool);
+
        if (!ret)
                ttm_bo_global_release();
 
@@ -1433,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
 
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
                       struct ttm_bo_driver *driver,
+                      struct device *dev,
                       struct address_space *mapping,
                       struct drm_vma_offset_manager *vma_manager,
-                      bool need_dma32)
+                      bool use_dma_alloc, bool use_dma32)
 {
        struct ttm_bo_global *glob = &ttm_bo_glob;
        int ret;
@@ -1450,12 +1327,12 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        bdev->driver = driver;
 
        ttm_bo_init_sysman(bdev);
+       ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
 
        bdev->vma_manager = vma_manager;
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = mapping;
-       bdev->need_dma32 = need_dma32;
        mutex_lock(&ttm_global_mutex);
        list_add_tail(&bdev->device_list, &glob->device_list);
        mutex_unlock(&ttm_global_mutex);
@@ -1506,8 +1383,9 @@ EXPORT_SYMBOL(ttm_bo_wait);
  * A buffer object shrink method that tries to swap out the first
  * buffer object on the bo_global::swap_lru list.
  */
-int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
 {
+       struct ttm_bo_global *glob = &ttm_bo_glob;
        struct ttm_buffer_object *bo;
        int ret = -EBUSY;
        bool locked;
@@ -1551,14 +1429,13 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
         * Move to system cached
         */
 
-       if (bo->mem.mem_type != TTM_PL_SYSTEM ||
-           bo->ttm->caching_state != tt_cached) {
+       if (bo->mem.mem_type != TTM_PL_SYSTEM) {
                struct ttm_operation_ctx ctx = { false, false };
                struct ttm_resource evict_mem;
 
                evict_mem = bo->mem;
                evict_mem.mm_node = NULL;
-               evict_mem.placement = TTM_PL_FLAG_CACHED;
+               evict_mem.placement = 0;
                evict_mem.mem_type = TTM_PL_SYSTEM;
 
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
@@ -1584,7 +1461,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
        if (bo->bdev->driver->swap_notify)
                bo->bdev->driver->swap_notify(bo);
 
-       ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
+       ret = ttm_tt_swapout(bo->bdev, bo->ttm);
 out:
 
        /**
@@ -1599,17 +1476,6 @@ out:
 }
 EXPORT_SYMBOL(ttm_bo_swapout);
 
-void ttm_bo_swapout_all(void)
-{
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
-
-       while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_swapout_all);
-
 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
 {
        if (bo->ttm == NULL)
@@ -1619,12 +1485,3 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
        bo->ttm = NULL;
 }
 
-int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
-{
-       return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
-}
-
-void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
-{
-       bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
-}
index fb2a25f8408fc8c1fb8da9b7829c100400c262a6..ecb54415d1ca8d5eb33317058f4cef16f51f7fd4 100644 (file)
@@ -45,53 +45,6 @@ struct ttm_transfer_obj {
        struct ttm_buffer_object *bo;
 };
 
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
-{
-       ttm_resource_free(bo, &bo->mem);
-}
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                  struct ttm_operation_ctx *ctx,
-                   struct ttm_resource *new_mem)
-{
-       struct ttm_tt *ttm = bo->ttm;
-       struct ttm_resource *old_mem = &bo->mem;
-       int ret;
-
-       if (old_mem->mem_type != TTM_PL_SYSTEM) {
-               ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-
-               if (unlikely(ret != 0)) {
-                       if (ret != -ERESTARTSYS)
-                               pr_err("Failed to expire sync object before unbinding TTM\n");
-                       return ret;
-               }
-
-               ttm_bo_tt_unbind(bo);
-               ttm_bo_free_old_node(bo);
-               old_mem->mem_type = TTM_PL_SYSTEM;
-       }
-
-       ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
-       if (unlikely(ret != 0))
-               return ret;
-
-       if (new_mem->mem_type != TTM_PL_SYSTEM) {
-
-               ret = ttm_tt_populate(bo->bdev, ttm, ctx);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               ret = ttm_bo_tt_bind(bo, new_mem);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
-
-       ttm_bo_assign_mem(bo, new_mem);
-       return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
-
 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
                       struct ttm_resource *mem)
 {
@@ -135,7 +88,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
        } else {
                size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
 
-               if (mem->placement & TTM_PL_FLAG_WC)
+               if (mem->bus.caching == ttm_write_combined)
                        addr = ioremap_wc(mem->bus.offset, bus_size);
                else
                        addr = ioremap(mem->bus.offset, bus_size);
@@ -227,11 +180,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        void *new_iomap;
        int ret;
        unsigned long i;
-       unsigned long page;
-       unsigned long add = 0;
-       int dir;
 
-       ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+       ret = ttm_bo_wait_ctx(bo, ctx);
        if (ret)
                return ret;
 
@@ -267,29 +217,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                        goto out1;
        }
 
-       add = 0;
-       dir = 1;
-
-       if ((old_mem->mem_type == new_mem->mem_type) &&
-           (new_mem->start < old_mem->start + old_mem->size)) {
-               dir = -1;
-               add = new_mem->num_pages - 1;
-       }
-
        for (i = 0; i < new_mem->num_pages; ++i) {
-               page = i * dir + add;
                if (old_iomap == NULL) {
-                       pgprot_t prot = ttm_io_prot(old_mem->placement,
-                                                   PAGE_KERNEL);
-                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+                       pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
+                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
                                                   prot);
                } else if (new_iomap == NULL) {
-                       pgprot_t prot = ttm_io_prot(new_mem->placement,
-                                                   PAGE_KERNEL);
-                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+                       pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
+                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
                                                   prot);
                } else {
-                       ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+                       ret = ttm_copy_io_page(new_iomap, old_iomap, i);
                }
                if (ret)
                        goto out1;
@@ -352,7 +290,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
                return -ENOMEM;
 
        fbo->base = *bo;
-       fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
 
        ttm_bo_get(bo);
        fbo->bo = bo;
@@ -372,6 +309,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        kref_init(&fbo->base.kref);
        fbo->base.destroy = &ttm_transfered_destroy;
        fbo->base.acc_size = 0;
+       fbo->base.pin_count = 1;
        if (bo->type != ttm_bo_type_sg)
                fbo->base.base.resv = &fbo->base.base._resv;
 
@@ -384,21 +322,28 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        return 0;
 }
 
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+                    pgprot_t tmp)
 {
+       struct ttm_resource_manager *man;
+       enum ttm_caching caching;
+
+       man = ttm_manager_type(bo->bdev, res->mem_type);
+       caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+
        /* Cached mappings need no adjustment */
-       if (caching_flags & TTM_PL_FLAG_CACHED)
+       if (caching == ttm_cached)
                return tmp;
 
 #if defined(__i386__) || defined(__x86_64__)
-       if (caching_flags & TTM_PL_FLAG_WC)
+       if (caching == ttm_write_combined)
                tmp = pgprot_writecombine(tmp);
        else if (boot_cpu_data.x86 > 3)
                tmp = pgprot_noncached(tmp);
 #endif
 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
     defined(__powerpc__) || defined(__mips__)
-       if (caching_flags & TTM_PL_FLAG_WC)
+       if (caching == ttm_write_combined)
                tmp = pgprot_writecombine(tmp);
        else
                tmp = pgprot_noncached(tmp);
@@ -422,7 +367,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
                map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
        } else {
                map->bo_kmap_type = ttm_bo_map_iomap;
-               if (mem->placement & TTM_PL_FLAG_WC)
+               if (mem->bus.caching == ttm_write_combined)
                        map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
                                                  size);
                else
@@ -452,7 +397,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
        if (ret)
                return ret;
 
-       if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+       if (num_pages == 1 && ttm->caching == ttm_cached) {
                /*
                 * We're mapping a single page, and the desired
                 * page protection is consistent with the bo.
@@ -466,7 +411,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                 * We need to use vmap to get the desired page protection
                 * or to make the buffer object look contiguous.
                 */
-               prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+               prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
                map->bo_kmap_type = ttm_bo_map_vmap;
                map->virtual = vmap(ttm->pages + start_page, num_pages,
                                    0, prot);
@@ -536,7 +481,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
 
        if (!dst_use_tt)
                ttm_bo_tt_destroy(bo);
-       ttm_bo_free_old_node(bo);
+       ttm_resource_free(bo, &bo->mem);
        return 0;
 }
 
@@ -597,7 +542,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
        }
        spin_unlock(&from->move_lock);
 
-       ttm_bo_free_old_node(bo);
+       ttm_resource_free(bo, &bo->mem);
 
        dma_fence_put(bo->moving);
        bo->moving = dma_fence_get(fence);
index 98a006fc30a58db3b1e097ca309af1d8556a6765..eeaca5d1efe3943bc37cee351ff3349eae95d9c8 100644 (file)
@@ -157,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
                        return VM_FAULT_NOPAGE;
        }
 
+       /*
+        * Refuse to fault imported pages. This should be handled
+        * (if at all) by redirecting mmap to the exporter.
+        */
+       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+               dma_resv_unlock(bo->base.resv);
+               return VM_FAULT_SIGBUS;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_vm_reserve);
@@ -281,35 +290,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
        vm_fault_t ret = VM_FAULT_NOPAGE;
        unsigned long address = vmf->address;
 
-       /*
-        * Refuse to fault imported pages. This should be handled
-        * (if at all) by redirecting mmap to the exporter.
-        */
-       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
-               return VM_FAULT_SIGBUS;
-
-       if (bdev->driver->fault_reserve_notify) {
-               struct dma_fence *moving = dma_fence_get(bo->moving);
-
-               err = bdev->driver->fault_reserve_notify(bo);
-               switch (err) {
-               case 0:
-                       break;
-               case -EBUSY:
-               case -ERESTARTSYS:
-                       dma_fence_put(moving);
-                       return VM_FAULT_NOPAGE;
-               default:
-                       dma_fence_put(moving);
-                       return VM_FAULT_SIGBUS;
-               }
-
-               if (bo->moving != moving) {
-                       ttm_bo_move_to_lru_tail_unlocked(bo);
-               }
-               dma_fence_put(moving);
-       }
-
        /*
         * Wait for buffer data in transit, due to a pipelined
         * move.
@@ -330,7 +310,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
        if (unlikely(page_offset >= bo->num_pages))
                return VM_FAULT_SIGBUS;
 
-       prot = ttm_io_prot(bo->mem.placement, prot);
+       prot = ttm_io_prot(bo, &bo->mem, prot);
        if (!bo->mem.bus.is_iomem) {
                struct ttm_operation_ctx ctx = {
                        .interruptible = false,
index 89d50f38c0f2c770825daa340e2ea2b0eef26c7f..f9a90bfaa3c1a8037590050f40d7c208a2f91716 100644 (file)
@@ -30,7 +30,6 @@
 
 #include <drm/ttm/ttm_memory.h>
 #include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
 #include <linux/spinlock.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
@@ -38,6 +37,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
+#include <drm/ttm/ttm_pool.h>
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
 
        while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
                spin_unlock(&glob->lock);
-               ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
+               ret = ttm_bo_swapout(ctx);
                spin_lock(&glob->lock);
                if (unlikely(ret != 0))
                        break;
@@ -451,8 +451,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
                pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
                        zone->name, (unsigned long long)zone->max_mem >> 10);
        }
-       ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
-       ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+       ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
        return 0;
 out_no_zone:
        ttm_mem_global_release(glob);
@@ -465,8 +464,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
        unsigned int i;
 
        /* let the page allocator first stop the shrink work. */
-       ttm_page_alloc_fini();
-       ttm_dma_page_alloc_fini();
+       ttm_pool_mgr_fini();
 
        flush_workqueue(glob->swap_queue);
        destroy_workqueue(glob->swap_queue);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
deleted file mode 100644 (file)
index 14660f7..0000000
+++ /dev/null
@@ -1,1189 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- *          Jerome Glisse <jglisse@redhat.com>
- *          Pauli Nieminen <suokkos@gmail.com>
- */
-
-/* simple list based uncached page pool
- * - Pool collects resently freed pages for reuse
- * - Use page->lru to keep a free list
- * - doesn't track currently in use pages
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/atomic.h>
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
-
-#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION               16
-#define FREE_ALL_PAGES                 (~0U)
-/* times are in msecs */
-#define PAGE_FREE_INTERVAL             1000
-
-/**
- * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
- *
- * @lock: Protects the shared pool from concurrnet access. Must be used with
- * irqsave/irqrestore variants because pool allocator maybe called from
- * delayed work.
- * @fill_lock: Prevent concurrent calls to fill.
- * @list: Pool of free uc/wc pages for fast reuse.
- * @gfp_flags: Flags to pass for alloc_page.
- * @npages: Number of pages in pool.
- */
-struct ttm_page_pool {
-       spinlock_t              lock;
-       bool                    fill_lock;
-       struct list_head        list;
-       gfp_t                   gfp_flags;
-       unsigned                npages;
-       char                    *name;
-       unsigned long           nfrees;
-       unsigned long           nrefills;
-       unsigned int            order;
-};
-
-/**
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
-       unsigned        alloc_size;
-       unsigned        max_size;
-       unsigned        small;
-};
-
-#define NUM_POOLS 6
-
-/**
- * struct ttm_pool_manager - Holds memory pools for fst allocation
- *
- * Manager is read only object for pool code so it doesn't need locking.
- *
- * @free_interval: minimum number of jiffies between freeing pages from pool.
- * @page_alloc_inited: reference counting for pool allocation.
- * @work: Work that is used to shrink the pool. Work is only run when there is
- * some pages to free.
- * @small_allocation: Limit in number of pages what is small allocation.
- *
- * @pools: All pool objects in use.
- **/
-struct ttm_pool_manager {
-       struct kobject          kobj;
-       struct shrinker         mm_shrink;
-       struct ttm_pool_opts    options;
-
-       union {
-               struct ttm_page_pool    pools[NUM_POOLS];
-               struct {
-                       struct ttm_page_pool    wc_pool;
-                       struct ttm_page_pool    uc_pool;
-                       struct ttm_page_pool    wc_pool_dma32;
-                       struct ttm_page_pool    uc_pool_dma32;
-                       struct ttm_page_pool    wc_pool_huge;
-                       struct ttm_page_pool    uc_pool_huge;
-               } ;
-       };
-};
-
-static struct attribute ttm_page_pool_max = {
-       .name = "pool_max_size",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
-       .name = "pool_small_allocation",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
-       .name = "pool_allocation_size",
-       .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
-       &ttm_page_pool_max,
-       &ttm_page_pool_small,
-       &ttm_page_pool_alloc_size,
-       NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
-       struct ttm_pool_manager *m =
-               container_of(kobj, struct ttm_pool_manager, kobj);
-       kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj,
-               struct attribute *attr, const char *buffer, size_t size)
-{
-       struct ttm_pool_manager *m =
-               container_of(kobj, struct ttm_pool_manager, kobj);
-       int chars;
-       unsigned val;
-       chars = sscanf(buffer, "%u", &val);
-       if (chars == 0)
-               return size;
-
-       /* Convert kb to number of pages */
-       val = val / (PAGE_SIZE >> 10);
-
-       if (attr == &ttm_page_pool_max)
-               m->options.max_size = val;
-       else if (attr == &ttm_page_pool_small)
-               m->options.small = val;
-       else if (attr == &ttm_page_pool_alloc_size) {
-               if (val > NUM_PAGES_TO_ALLOC*8) {
-                       pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
-                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
-                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
-                       return size;
-               } else if (val > NUM_PAGES_TO_ALLOC) {
-                       pr_warn("Setting allocation size to larger than %lu is not recommended\n",
-                               NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
-               }
-               m->options.alloc_size = val;
-       }
-
-       return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj,
-               struct attribute *attr, char *buffer)
-{
-       struct ttm_pool_manager *m =
-               container_of(kobj, struct ttm_pool_manager, kobj);
-       unsigned val = 0;
-
-       if (attr == &ttm_page_pool_max)
-               val = m->options.max_size;
-       else if (attr == &ttm_page_pool_small)
-               val = m->options.small;
-       else if (attr == &ttm_page_pool_alloc_size)
-               val = m->options.alloc_size;
-
-       val = val * (PAGE_SIZE >> 10);
-
-       return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
-       .show = &ttm_pool_show,
-       .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
-       .release = &ttm_pool_kobj_release,
-       .sysfs_ops = &ttm_pool_sysfs_ops,
-       .default_attrs = ttm_pool_attrs,
-};
-
-static struct ttm_pool_manager *_manager;
-
-/**
- * Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
-                                         enum ttm_caching_state cstate)
-{
-       int pool_index;
-
-       if (cstate == tt_cached)
-               return NULL;
-
-       if (cstate == tt_wc)
-               pool_index = 0x0;
-       else
-               pool_index = 0x1;
-
-       if (flags & TTM_PAGE_FLAG_DMA32) {
-               if (huge)
-                       return NULL;
-               pool_index |= 0x2;
-
-       } else if (huge) {
-               pool_index |= 0x4;
-       }
-
-       return &_manager->pools[pool_index];
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages,
-               unsigned int order)
-{
-       unsigned int i, pages_nr = (1 << order);
-
-       if (order == 0) {
-               if (ttm_set_pages_array_wb(pages, npages))
-                       pr_err("Failed to set %d pages to wb!\n", npages);
-       }
-
-       for (i = 0; i < npages; ++i) {
-               if (order > 0) {
-                       if (ttm_set_pages_wb(pages[i], pages_nr))
-                               pr_err("Failed to set %d pages to wb!\n", pages_nr);
-               }
-               __free_pages(pages[i], order);
-       }
-}
-
-static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
-               unsigned freed_pages)
-{
-       pool->npages -= freed_pages;
-       pool->nfrees += freed_pages;
-}
-
-/**
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @free_all: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
- **/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
-                             bool use_static)
-{
-       static struct page *static_buf[NUM_PAGES_TO_ALLOC];
-       unsigned long irq_flags;
-       struct page *p;
-       struct page **pages_to_free;
-       unsigned freed_pages = 0,
-                npages_to_free = nr_free;
-
-       if (NUM_PAGES_TO_ALLOC < nr_free)
-               npages_to_free = NUM_PAGES_TO_ALLOC;
-
-       if (use_static)
-               pages_to_free = static_buf;
-       else
-               pages_to_free = kmalloc_array(npages_to_free,
-                                             sizeof(struct page *),
-                                             GFP_KERNEL);
-       if (!pages_to_free) {
-               pr_debug("Failed to allocate memory for pool free operation\n");
-               return 0;
-       }
-
-restart:
-       spin_lock_irqsave(&pool->lock, irq_flags);
-
-       list_for_each_entry_reverse(p, &pool->list, lru) {
-               if (freed_pages >= npages_to_free)
-                       break;
-
-               pages_to_free[freed_pages++] = p;
-               /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
-               if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-                       /* remove range of pages from the pool */
-                       __list_del(p->lru.prev, &pool->list);
-
-                       ttm_pool_update_free_locked(pool, freed_pages);
-                       /**
-                        * Because changing page caching is costly
-                        * we unlock the pool to prevent stalling.
-                        */
-                       spin_unlock_irqrestore(&pool->lock, irq_flags);
-
-                       ttm_pages_put(pages_to_free, freed_pages, pool->order);
-                       if (likely(nr_free != FREE_ALL_PAGES))
-                               nr_free -= freed_pages;
-
-                       if (NUM_PAGES_TO_ALLOC >= nr_free)
-                               npages_to_free = nr_free;
-                       else
-                               npages_to_free = NUM_PAGES_TO_ALLOC;
-
-                       freed_pages = 0;
-
-                       /* free all so restart the processing */
-                       if (nr_free)
-                               goto restart;
-
-                       /* Not allowed to fall through or break because
-                        * following context is inside spinlock while we are
-                        * outside here.
-                        */
-                       goto out;
-
-               }
-       }
-
-       /* remove range of pages from the pool */
-       if (freed_pages) {
-               __list_del(&p->lru, &pool->list);
-
-               ttm_pool_update_free_locked(pool, freed_pages);
-               nr_free -= freed_pages;
-       }
-
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-
-       if (freed_pages)
-               ttm_pages_put(pages_to_free, freed_pages, pool->order);
-out:
-       if (pages_to_free != static_buf)
-               kfree(pages_to_free);
-       return nr_free;
-}
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * This code is crying out for a shrinker per pool....
- */
-static unsigned long
-ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
-       static DEFINE_MUTEX(lock);
-       static unsigned start_pool;
-       unsigned i;
-       unsigned pool_offset;
-       struct ttm_page_pool *pool;
-       int shrink_pages = sc->nr_to_scan;
-       unsigned long freed = 0;
-       unsigned int nr_free_pool;
-
-       if (!mutex_trylock(&lock))
-               return SHRINK_STOP;
-       pool_offset = ++start_pool % NUM_POOLS;
-       /* select start pool in round robin fashion */
-       for (i = 0; i < NUM_POOLS; ++i) {
-               unsigned nr_free = shrink_pages;
-               unsigned page_nr;
-
-               if (shrink_pages == 0)
-                       break;
-
-               pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
-               page_nr = (1 << pool->order);
-               /* OK to use static buffer since global mutex is held. */
-               nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
-               shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
-               freed += (nr_free_pool - shrink_pages) << pool->order;
-               if (freed >= sc->nr_to_scan)
-                       break;
-               shrink_pages <<= pool->order;
-       }
-       mutex_unlock(&lock);
-       return freed;
-}
-
-
-static unsigned long
-ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
-       unsigned i;
-       unsigned long count = 0;
-       struct ttm_page_pool *pool;
-
-       for (i = 0; i < NUM_POOLS; ++i) {
-               pool = &_manager->pools[i];
-               count += (pool->npages << pool->order);
-       }
-
-       return count;
-}
-
-static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
-       manager->mm_shrink.count_objects = ttm_pool_shrink_count;
-       manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
-       manager->mm_shrink.seeks = 1;
-       return register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
-       unregister_shrinker(&manager->mm_shrink);
-}
-
-static int ttm_set_pages_caching(struct page **pages,
-               enum ttm_caching_state cstate, unsigned cpages)
-{
-       int r = 0;
-       /* Set page caching */
-       switch (cstate) {
-       case tt_uncached:
-               r = ttm_set_pages_array_uc(pages, cpages);
-               if (r)
-                       pr_err("Failed to set %d pages to uc!\n", cpages);
-               break;
-       case tt_wc:
-               r = ttm_set_pages_array_wc(pages, cpages);
-               if (r)
-                       pr_err("Failed to set %d pages to wc!\n", cpages);
-               break;
-       default:
-               break;
-       }
-       return r;
-}
-
-/**
- * Free pages the pages that failed to change the caching state. If there is
- * any pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_handle_caching_state_failure(struct list_head *pages,
-               int ttm_flags, enum ttm_caching_state cstate,
-               struct page **failed_pages, unsigned cpages)
-{
-       unsigned i;
-       /* Failed pages have to be freed */
-       for (i = 0; i < cpages; ++i) {
-               list_del(&failed_pages[i]->lru);
-               __free_page(failed_pages[i]);
-       }
-}
-
-/**
- * Allocate new pages with correct caching.
- *
- * This function is reentrant if caller updates count depending on number of
- * pages returned in pages array.
- */
-static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
-                              int ttm_flags, enum ttm_caching_state cstate,
-                              unsigned count, unsigned order)
-{
-       struct page **caching_array;
-       struct page *p;
-       int r = 0;
-       unsigned i, j, cpages;
-       unsigned npages = 1 << order;
-       unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
-
-       /* allocate array for page caching change */
-       caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
-                                     GFP_KERNEL);
-
-       if (!caching_array) {
-               pr_debug("Unable to allocate table for new pages\n");
-               return -ENOMEM;
-       }
-
-       for (i = 0, cpages = 0; i < count; ++i) {
-               p = alloc_pages(gfp_flags, order);
-
-               if (!p) {
-                       pr_debug("Unable to get page %u\n", i);
-
-                       /* store already allocated pages in the pool after
-                        * setting the caching state */
-                       if (cpages) {
-                               r = ttm_set_pages_caching(caching_array,
-                                                         cstate, cpages);
-                               if (r)
-                                       ttm_handle_caching_state_failure(pages,
-                                               ttm_flags, cstate,
-                                               caching_array, cpages);
-                       }
-                       r = -ENOMEM;
-                       goto out;
-               }
-
-               list_add(&p->lru, pages);
-
-#ifdef CONFIG_HIGHMEM
-               /* gfp flags of highmem page should never be dma32 so we
-                * we should be fine in such case
-                */
-               if (PageHighMem(p))
-                       continue;
-
-#endif
-               for (j = 0; j < npages; ++j) {
-                       caching_array[cpages++] = p++;
-                       if (cpages == max_cpages) {
-
-                               r = ttm_set_pages_caching(caching_array,
-                                               cstate, cpages);
-                               if (r) {
-                                       ttm_handle_caching_state_failure(pages,
-                                               ttm_flags, cstate,
-                                               caching_array, cpages);
-                                       goto out;
-                               }
-                               cpages = 0;
-                       }
-               }
-       }
-
-       if (cpages) {
-               r = ttm_set_pages_caching(caching_array, cstate, cpages);
-               if (r)
-                       ttm_handle_caching_state_failure(pages,
-                                       ttm_flags, cstate,
-                                       caching_array, cpages);
-       }
-out:
-       kfree(caching_array);
-
-       return r;
-}
-
-/**
- * Fill the given pool if there aren't enough pages and the requested number of
- * pages is small.
- */
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
-                                     enum ttm_caching_state cstate,
-                                     unsigned count, unsigned long *irq_flags)
-{
-       struct page *p;
-       int r;
-       unsigned cpages = 0;
-       /**
-        * Only allow one pool fill operation at a time.
-        * If pool doesn't have enough pages for the allocation new pages are
-        * allocated from outside of pool.
-        */
-       if (pool->fill_lock)
-               return;
-
-       pool->fill_lock = true;
-
-       /* If allocation request is small and there are not enough
-        * pages in a pool we fill the pool up first. */
-       if (count < _manager->options.small
-               && count > pool->npages) {
-               struct list_head new_pages;
-               unsigned alloc_size = _manager->options.alloc_size;
-
-               /**
-                * Can't change page caching if in irqsave context. We have to
-                * drop the pool->lock.
-                */
-               spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
-               INIT_LIST_HEAD(&new_pages);
-               r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
-                                       cstate, alloc_size, 0);
-               spin_lock_irqsave(&pool->lock, *irq_flags);
-
-               if (!r) {
-                       list_splice(&new_pages, &pool->list);
-                       ++pool->nrefills;
-                       pool->npages += alloc_size;
-               } else {
-                       pr_debug("Failed to fill pool (%p)\n", pool);
-                       /* If we have any pages left put them to the pool. */
-                       list_for_each_entry(p, &new_pages, lru) {
-                               ++cpages;
-                       }
-                       list_splice(&new_pages, &pool->list);
-                       pool->npages += cpages;
-               }
-
-       }
-       pool->fill_lock = false;
-}
-
-/**
- * Allocate pages from the pool and put them on the return list.
- *
- * @return zero for success or negative error code.
- */
-static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
-                                  struct list_head *pages,
-                                  int ttm_flags,
-                                  enum ttm_caching_state cstate,
-                                  unsigned count, unsigned order)
-{
-       unsigned long irq_flags;
-       struct list_head *p;
-       unsigned i;
-       int r = 0;
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       if (!order)
-               ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
-                                         &irq_flags);
-
-       if (count >= pool->npages) {
-               /* take all pages from the pool */
-               list_splice_init(&pool->list, pages);
-               count -= pool->npages;
-               pool->npages = 0;
-               goto out;
-       }
-       /* find the last pages to include for requested number of pages. Split
-        * pool to begin and halve it to reduce search space. */
-       if (count <= pool->npages/2) {
-               i = 0;
-               list_for_each(p, &pool->list) {
-                       if (++i == count)
-                               break;
-               }
-       } else {
-               i = pool->npages + 1;
-               list_for_each_prev(p, &pool->list) {
-                       if (--i == count)
-                               break;
-               }
-       }
-       /* Cut 'count' number of pages from the pool */
-       list_cut_position(pages, &pool->list, p);
-       pool->npages -= count;
-       count = 0;
-out:
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-
-       /* clear the pages coming from the pool if requested */
-       if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
-               struct page *page;
-
-               list_for_each_entry(page, pages, lru) {
-                       if (PageHighMem(page))
-                               clear_highpage(page);
-                       else
-                               clear_page(page_address(page));
-               }
-       }
-
-       /* If pool didn't have enough pages allocate new one. */
-       if (count) {
-               gfp_t gfp_flags = pool->gfp_flags;
-
-               /* set zero flag for page allocation if required */
-               if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-                       gfp_flags |= __GFP_ZERO;
-
-               if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
-                       gfp_flags |= __GFP_RETRY_MAYFAIL;
-
-               /* ttm_alloc_new_pages doesn't reference pool so we can run
-                * multiple requests in parallel.
-                **/
-               r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
-                                       count, order);
-       }
-
-       return r;
-}
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
-                         enum ttm_caching_state cstate)
-{
-       struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
-#endif
-       unsigned long irq_flags;
-       unsigned i;
-
-       if (pool == NULL) {
-               /* No pool for this memory type so free the pages */
-               i = 0;
-               while (i < npages) {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       struct page *p = pages[i];
-#endif
-                       unsigned order = 0, j;
-
-                       if (!pages[i]) {
-                               ++i;
-                               continue;
-                       }
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       if (!(flags & TTM_PAGE_FLAG_DMA32) &&
-                           (npages - i) >= HPAGE_PMD_NR) {
-                               for (j = 1; j < HPAGE_PMD_NR; ++j)
-                                       if (++p != pages[i + j])
-                                           break;
-
-                               if (j == HPAGE_PMD_NR)
-                                       order = HPAGE_PMD_ORDER;
-                       }
-#endif
-
-                       if (page_count(pages[i]) != 1)
-                               pr_err("Erroneous page count. Leaking pages.\n");
-                       __free_pages(pages[i], order);
-
-                       j = 1 << order;
-                       while (j) {
-                               pages[i++] = NULL;
-                               --j;
-                       }
-               }
-               return;
-       }
-
-       i = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (huge) {
-               unsigned max_size, n2free;
-
-               spin_lock_irqsave(&huge->lock, irq_flags);
-               while ((npages - i) >= HPAGE_PMD_NR) {
-                       struct page *p = pages[i];
-                       unsigned j;
-
-                       if (!p)
-                               break;
-
-                       for (j = 1; j < HPAGE_PMD_NR; ++j)
-                               if (++p != pages[i + j])
-                                   break;
-
-                       if (j != HPAGE_PMD_NR)
-                               break;
-
-                       list_add_tail(&pages[i]->lru, &huge->list);
-
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               pages[i++] = NULL;
-                       huge->npages++;
-               }
-
-               /* Check that we don't go over the pool limit */
-               max_size = _manager->options.max_size;
-               max_size /= HPAGE_PMD_NR;
-               if (huge->npages > max_size)
-                       n2free = huge->npages - max_size;
-               else
-                       n2free = 0;
-               spin_unlock_irqrestore(&huge->lock, irq_flags);
-               if (n2free)
-                       ttm_page_pool_free(huge, n2free, false);
-       }
-#endif
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       while (i < npages) {
-               if (pages[i]) {
-                       if (page_count(pages[i]) != 1)
-                               pr_err("Erroneous page count. Leaking pages.\n");
-                       list_add_tail(&pages[i]->lru, &pool->list);
-                       pages[i] = NULL;
-                       pool->npages++;
-               }
-               ++i;
-       }
-       /* Check that we don't go over the pool limit */
-       npages = 0;
-       if (pool->npages > _manager->options.max_size) {
-               npages = pool->npages - _manager->options.max_size;
-               /* free at least NUM_PAGES_TO_ALLOC number of pages
-                * to reduce calls to set_memory_wb */
-               if (npages < NUM_PAGES_TO_ALLOC)
-                       npages = NUM_PAGES_TO_ALLOC;
-       }
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-       if (npages)
-               ttm_page_pool_free(pool, npages, false);
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages.
- */
-static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
-                        enum ttm_caching_state cstate)
-{
-       struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
-#endif
-       struct list_head plist;
-       struct page *p = NULL;
-       unsigned count, first;
-       int r;
-
-       /* No pool for cached pages */
-       if (pool == NULL) {
-               gfp_t gfp_flags = GFP_USER;
-               unsigned i;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               unsigned j;
-#endif
-
-               /* set zero flag for page allocation if required */
-               if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-                       gfp_flags |= __GFP_ZERO;
-
-               if (flags & TTM_PAGE_FLAG_NO_RETRY)
-                       gfp_flags |= __GFP_RETRY_MAYFAIL;
-
-               if (flags & TTM_PAGE_FLAG_DMA32)
-                       gfp_flags |= GFP_DMA32;
-               else
-                       gfp_flags |= GFP_HIGHUSER;
-
-               i = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               if (!(gfp_flags & GFP_DMA32)) {
-                       while (npages >= HPAGE_PMD_NR) {
-                               gfp_t huge_flags = gfp_flags;
-
-                               huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
-                                       __GFP_KSWAPD_RECLAIM;
-                               huge_flags &= ~__GFP_MOVABLE;
-                               huge_flags &= ~__GFP_COMP;
-                               p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
-                               if (!p)
-                                       break;
-
-                               for (j = 0; j < HPAGE_PMD_NR; ++j)
-                                       pages[i++] = p++;
-
-                               npages -= HPAGE_PMD_NR;
-                       }
-               }
-#endif
-
-               first = i;
-               while (npages) {
-                       p = alloc_page(gfp_flags);
-                       if (!p) {
-                               pr_debug("Unable to allocate page\n");
-                               return -ENOMEM;
-                       }
-
-                       /* Swap the pages if we detect consecutive order */
-                       if (i > first && pages[i - 1] == p - 1)
-                               swap(p, pages[i - 1]);
-
-                       pages[i++] = p;
-                       --npages;
-               }
-               return 0;
-       }
-
-       count = 0;
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (huge && npages >= HPAGE_PMD_NR) {
-               INIT_LIST_HEAD(&plist);
-               ttm_page_pool_get_pages(huge, &plist, flags, cstate,
-                                       npages / HPAGE_PMD_NR,
-                                       HPAGE_PMD_ORDER);
-
-               list_for_each_entry(p, &plist, lru) {
-                       unsigned j;
-
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               pages[count++] = &p[j];
-               }
-       }
-#endif
-
-       INIT_LIST_HEAD(&plist);
-       r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
-                                   npages - count, 0);
-
-       first = count;
-       list_for_each_entry(p, &plist, lru) {
-               struct page *tmp = p;
-
-               /* Swap the pages if we detect consecutive order */
-               if (count > first && pages[count - 1] == tmp - 1)
-                       swap(tmp, pages[count - 1]);
-               pages[count++] = tmp;
-       }
-
-       if (r) {
-               /* If there is any pages in the list put them back to
-                * the pool.
-                */
-               pr_debug("Failed to allocate extra pages for large request\n");
-               ttm_put_pages(pages, count, flags, cstate);
-               return r;
-       }
-
-       return 0;
-}
-
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
-               char *name, unsigned int order)
-{
-       spin_lock_init(&pool->lock);
-       pool->fill_lock = false;
-       INIT_LIST_HEAD(&pool->list);
-       pool->npages = pool->nfrees = 0;
-       pool->gfp_flags = flags;
-       pool->name = name;
-       pool->order = order;
-}
-
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
-       int ret;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       unsigned order = HPAGE_PMD_ORDER;
-#else
-       unsigned order = 0;
-#endif
-
-       WARN_ON(_manager);
-
-       pr_info("Initializing pool allocator\n");
-
-       _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
-       if (!_manager)
-               return -ENOMEM;
-
-       ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
-
-       ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
-
-       ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-                                 GFP_USER | GFP_DMA32, "wc dma", 0);
-
-       ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
-                                 GFP_USER | GFP_DMA32, "uc dma", 0);
-
-       ttm_page_pool_init_locked(&_manager->wc_pool_huge,
-                                 (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
-                                  __GFP_KSWAPD_RECLAIM) &
-                                 ~(__GFP_MOVABLE | __GFP_COMP),
-                                 "wc huge", order);
-
-       ttm_page_pool_init_locked(&_manager->uc_pool_huge,
-                                 (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
-                                  __GFP_KSWAPD_RECLAIM) &
-                                 ~(__GFP_MOVABLE | __GFP_COMP)
-                                 , "uc huge", order);
-
-       _manager->options.max_size = max_pages;
-       _manager->options.small = SMALL_ALLOCATION;
-       _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
-       ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
-                                  &glob->kobj, "pool");
-       if (unlikely(ret != 0))
-               goto error;
-
-       ret = ttm_pool_mm_shrink_init(_manager);
-       if (unlikely(ret != 0))
-               goto error;
-       return 0;
-
-error:
-       kobject_put(&_manager->kobj);
-       _manager = NULL;
-       return ret;
-}
-
-void ttm_page_alloc_fini(void)
-{
-       int i;
-
-       pr_info("Finalizing pool allocator\n");
-       ttm_pool_mm_shrink_fini(_manager);
-
-       /* OK to use static buffer since global mutex is no longer used. */
-       for (i = 0; i < NUM_POOLS; ++i)
-               ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
-
-       kobject_put(&_manager->kobj);
-       _manager = NULL;
-}
-
-static void
-ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
-{
-       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-       unsigned i;
-
-       if (mem_count_update == 0)
-               goto put_pages;
-
-       for (i = 0; i < mem_count_update; ++i) {
-               if (!ttm->pages[i])
-                       continue;
-
-               ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
-       }
-
-put_pages:
-       ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
-                     ttm->caching_state);
-       ttm_tt_set_unpopulated(ttm);
-}
-
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
-{
-       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-       unsigned i;
-       int ret;
-
-       if (ttm_tt_is_populated(ttm))
-               return 0;
-
-       if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
-               return -ENOMEM;
-
-       ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
-                           ttm->caching_state);
-       if (unlikely(ret != 0)) {
-               ttm_pool_unpopulate_helper(ttm, 0);
-               return ret;
-       }
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
-                                               PAGE_SIZE, ctx);
-               if (unlikely(ret != 0)) {
-                       ttm_pool_unpopulate_helper(ttm, i);
-                       return -ENOMEM;
-               }
-       }
-
-       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
-               ret = ttm_tt_swapin(ttm);
-               if (unlikely(ret != 0)) {
-                       ttm_pool_unpopulate(ttm);
-                       return ret;
-               }
-       }
-
-       ttm_tt_set_populated(ttm);
-       return 0;
-}
-EXPORT_SYMBOL(ttm_pool_populate);
-
-void ttm_pool_unpopulate(struct ttm_tt *ttm)
-{
-       ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
-}
-EXPORT_SYMBOL(ttm_pool_unpopulate);
-
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
-                                       struct ttm_operation_ctx *ctx)
-{
-       unsigned i, j;
-       int r;
-
-       r = ttm_pool_populate(&tt->ttm, ctx);
-       if (r)
-               return r;
-
-       for (i = 0; i < tt->ttm.num_pages; ++i) {
-               struct page *p = tt->ttm.pages[i];
-               size_t num_pages = 1;
-
-               for (j = i + 1; j < tt->ttm.num_pages; ++j) {
-                       if (++p != tt->ttm.pages[j])
-                               break;
-
-                       ++num_pages;
-               }
-
-               tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
-                                                 0, num_pages * PAGE_SIZE,
-                                                 DMA_BIDIRECTIONAL);
-               if (dma_mapping_error(dev, tt->dma_address[i])) {
-                       while (i--) {
-                               dma_unmap_page(dev, tt->dma_address[i],
-                                              PAGE_SIZE, DMA_BIDIRECTIONAL);
-                               tt->dma_address[i] = 0;
-                       }
-                       ttm_pool_unpopulate(&tt->ttm);
-                       return -EFAULT;
-               }
-
-               for (j = 1; j < num_pages; ++j) {
-                       tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
-                       ++i;
-               }
-       }
-       return 0;
-}
-EXPORT_SYMBOL(ttm_populate_and_map_pages);
-
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-       unsigned i, j;
-
-       for (i = 0; i < tt->ttm.num_pages;) {
-               struct page *p = tt->ttm.pages[i];
-               size_t num_pages = 1;
-
-               if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
-                       ++i;
-                       continue;
-               }
-
-               for (j = i + 1; j < tt->ttm.num_pages; ++j) {
-                       if (++p != tt->ttm.pages[j])
-                               break;
-
-                       ++num_pages;
-               }
-
-               dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
-                              DMA_BIDIRECTIONAL);
-
-               i += num_pages;
-       }
-       ttm_pool_unpopulate(&tt->ttm);
-}
-EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
-{
-       struct ttm_page_pool *p;
-       unsigned i;
-       char *h[] = {"pool", "refills", "pages freed", "size"};
-       if (!_manager) {
-               seq_printf(m, "No pool allocator running.\n");
-               return 0;
-       }
-       seq_printf(m, "%7s %12s %13s %8s\n",
-                       h[0], h[1], h[2], h[3]);
-       for (i = 0; i < NUM_POOLS; ++i) {
-               p = &_manager->pools[i];
-
-               seq_printf(m, "%7s %12ld %13ld %8d\n",
-                               p->name, p->nrefills,
-                               p->nfrees, p->npages);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
deleted file mode 100644 (file)
index 5e2df11..0000000
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*
- * Copyright 2011 (c) Oracle Corp.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-/*
- * A simple DMA pool losely based on dmapool.c. It has certain advantages
- * over the DMA pools:
- * - Pool collects resently freed pages for reuse (and hooks up to
- *   the shrinker).
- * - Tracks currently in use pages
- * - Tracks whether the page is UC, WB or cached (and reverts to WB
- *   when freed).
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
-
-#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION               4
-#define FREE_ALL_PAGES                 (~0U)
-#define VADDR_FLAG_HUGE_POOL           1UL
-#define VADDR_FLAG_UPDATED_COUNT       2UL
-
-enum pool_type {
-       IS_UNDEFINED    = 0,
-       IS_WC           = 1 << 1,
-       IS_UC           = 1 << 2,
-       IS_CACHED       = 1 << 3,
-       IS_DMA32        = 1 << 4,
-       IS_HUGE         = 1 << 5
-};
-
-/*
- * The pool structure. There are up to nine pools:
- *  - generic (not restricted to DMA32):
- *      - write combined, uncached, cached.
- *  - dma32 (up to 2^32 - so up 4GB):
- *      - write combined, uncached, cached.
- *  - huge (not restricted to DMA32):
- *      - write combined, uncached, cached.
- * for each 'struct device'. The 'cached' is for pages that are actively used.
- * The other ones can be shrunk by the shrinker API if neccessary.
- * @pools: The 'struct device->dma_pools' link.
- * @type: Type of the pool
- * @lock: Protects the free_list from concurrnet access. Must be
- * used with irqsave/irqrestore variants because pool allocator maybe called
- * from delayed work.
- * @free_list: Pool of pages that are free to be used. No order requirements.
- * @dev: The device that is associated with these pools.
- * @size: Size used during DMA allocation.
- * @npages_free: Count of available pages for re-use.
- * @npages_in_use: Count of pages that are in use.
- * @nfrees: Stats when pool is shrinking.
- * @nrefills: Stats when the pool is grown.
- * @gfp_flags: Flags to pass for alloc_page.
- * @name: Name of the pool.
- * @dev_name: Name derieved from dev - similar to how dev_info works.
- *   Used during shutdown as the dev_info during release is unavailable.
- */
-struct dma_pool {
-       struct list_head pools; /* The 'struct device->dma_pools link */
-       enum pool_type type;
-       spinlock_t lock;
-       struct list_head free_list;
-       struct device *dev;
-       unsigned size;
-       unsigned npages_free;
-       unsigned npages_in_use;
-       unsigned long nfrees; /* Stats when shrunk. */
-       unsigned long nrefills; /* Stats when grown. */
-       gfp_t gfp_flags;
-       char name[13]; /* "cached dma32" */
-       char dev_name[64]; /* Constructed from dev */
-};
-
-/*
- * The accounting page keeping track of the allocated page along with
- * the DMA address.
- * @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page and a flag if the page belongs to a
- * huge pool
- * @dma: The bus address of the page. If the page is not allocated
- *   via the DMA API, it will be -1.
- */
-struct dma_page {
-       struct list_head page_list;
-       unsigned long vaddr;
-       struct page *p;
-       dma_addr_t dma;
-};
-
-/*
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
-       unsigned        alloc_size;
-       unsigned        max_size;
-       unsigned        small;
-};
-
-/*
- * Contains the list of all of the 'struct device' and their corresponding
- * DMA pools. Guarded by _mutex->lock.
- * @pools: The link to 'struct ttm_pool_manager->pools'
- * @dev: The 'struct device' associated with the 'pool'
- * @pool: The 'struct dma_pool' associated with the 'dev'
- */
-struct device_pools {
-       struct list_head pools;
-       struct device *dev;
-       struct dma_pool *pool;
-};
-
-/*
- * struct ttm_pool_manager - Holds memory pools for fast allocation
- *
- * @lock: Lock used when adding/removing from pools
- * @pools: List of 'struct device' and 'struct dma_pool' tuples.
- * @options: Limits for the pool.
- * @npools: Total amount of pools in existence.
- * @shrinker: The structure used by [un|]register_shrinker
- */
-struct ttm_pool_manager {
-       struct mutex            lock;
-       struct list_head        pools;
-       struct ttm_pool_opts    options;
-       unsigned                npools;
-       struct shrinker         mm_shrink;
-       struct kobject          kobj;
-};
-
-static struct ttm_pool_manager *_manager;
-
-static struct attribute ttm_page_pool_max = {
-       .name = "pool_max_size",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
-       .name = "pool_small_allocation",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
-       .name = "pool_allocation_size",
-       .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
-       &ttm_page_pool_max,
-       &ttm_page_pool_small,
-       &ttm_page_pool_alloc_size,
-       NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
-       struct ttm_pool_manager *m =
-               container_of(kobj, struct ttm_pool_manager, kobj);
-       kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
-                             const char *buffer, size_t size)
-{
-       struct ttm_pool_manager *m =
-               container_of(kobj, struct ttm_pool_manager, kobj);
-       int chars;
-       unsigned val;
-
-       chars = sscanf(buffer, "%u", &val);
-       if (chars == 0)
-               return size;
-
-       /* Convert kb to number of pages */
-       val = val / (PAGE_SIZE >> 10);
-
-       if (attr == &ttm_page_pool_max) {
-               m->options.max_size = val;
-       } else if (attr == &ttm_page_pool_small) {
-               m->options.small = val;
-       } else if (attr == &ttm_page_pool_alloc_size) {
-               if (val > NUM_PAGES_TO_ALLOC*8) {
-                       pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
-                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
-                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
-                       return size;
-               } else if (val > NUM_PAGES_TO_ALLOC) {
-                       pr_warn("Setting allocation size to larger than %lu is not recommended\n",
-                               NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
-               }
-               m->options.alloc_size = val;
-       }
-
-       return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
-                            char *buffer)
-{
-       struct ttm_pool_manager *m =
-               container_of(kobj, struct ttm_pool_manager, kobj);
-       unsigned val = 0;
-
-       if (attr == &ttm_page_pool_max)
-               val = m->options.max_size;
-       else if (attr == &ttm_page_pool_small)
-               val = m->options.small;
-       else if (attr == &ttm_page_pool_alloc_size)
-               val = m->options.alloc_size;
-
-       val = val * (PAGE_SIZE >> 10);
-
-       return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
-       .show = &ttm_pool_show,
-       .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
-       .release = &ttm_pool_kobj_release,
-       .sysfs_ops = &ttm_pool_sysfs_ops,
-       .default_attrs = ttm_pool_attrs,
-};
-
-static int ttm_set_pages_caching(struct dma_pool *pool,
-                                struct page **pages, unsigned cpages)
-{
-       int r = 0;
-       /* Set page caching */
-       if (pool->type & IS_UC) {
-               r = ttm_set_pages_array_uc(pages, cpages);
-               if (r)
-                       pr_err("%s: Failed to set %d pages to uc!\n",
-                              pool->dev_name, cpages);
-       }
-       if (pool->type & IS_WC) {
-               r = ttm_set_pages_array_wc(pages, cpages);
-               if (r)
-                       pr_err("%s: Failed to set %d pages to wc!\n",
-                              pool->dev_name, cpages);
-       }
-       return r;
-}
-
-static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
-{
-       unsigned long attrs = 0;
-       dma_addr_t dma = d_page->dma;
-       d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
-       if (pool->type & IS_HUGE)
-               attrs = DMA_ATTR_NO_WARN;
-
-       dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
-
-       kfree(d_page);
-       d_page = NULL;
-}
-static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
-{
-       struct dma_page *d_page;
-       unsigned long attrs = 0;
-       void *vaddr;
-
-       d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
-       if (!d_page)
-               return NULL;
-
-       if (pool->type & IS_HUGE)
-               attrs = DMA_ATTR_NO_WARN;
-
-       vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
-                               pool->gfp_flags, attrs);
-       if (vaddr) {
-               if (is_vmalloc_addr(vaddr))
-                       d_page->p = vmalloc_to_page(vaddr);
-               else
-                       d_page->p = virt_to_page(vaddr);
-               d_page->vaddr = (unsigned long)vaddr;
-               if (pool->type & IS_HUGE)
-                       d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
-       } else {
-               kfree(d_page);
-               d_page = NULL;
-       }
-       return d_page;
-}
-static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
-{
-       enum pool_type type = IS_UNDEFINED;
-
-       if (flags & TTM_PAGE_FLAG_DMA32)
-               type |= IS_DMA32;
-       if (cstate == tt_cached)
-               type |= IS_CACHED;
-       else if (cstate == tt_uncached)
-               type |= IS_UC;
-       else
-               type |= IS_WC;
-
-       return type;
-}
-
-static void ttm_pool_update_free_locked(struct dma_pool *pool,
-                                       unsigned freed_pages)
-{
-       pool->npages_free -= freed_pages;
-       pool->nfrees += freed_pages;
-
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
-       struct page *page = d_page->p;
-       unsigned num_pages;
-
-       /* Don't set WB on WB page pool. */
-       if (!(pool->type & IS_CACHED)) {
-               num_pages = pool->size / PAGE_SIZE;
-               if (ttm_set_pages_wb(page, num_pages))
-                       pr_err("%s: Failed to set %d pages to wb!\n",
-                              pool->dev_name, num_pages);
-       }
-
-       list_del(&d_page->page_list);
-       __ttm_dma_free_page(pool, d_page);
-}
-
-static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
-                             struct page *pages[], unsigned npages)
-{
-       struct dma_page *d_page, *tmp;
-
-       if (pool->type & IS_HUGE) {
-               list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
-                       ttm_dma_page_put(pool, d_page);
-
-               return;
-       }
-
-       /* Don't set WB on WB page pool. */
-       if (npages && !(pool->type & IS_CACHED) &&
-           ttm_set_pages_array_wb(pages, npages))
-               pr_err("%s: Failed to set %d pages to wb!\n",
-                      pool->dev_name, npages);
-
-       list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
-               list_del(&d_page->page_list);
-               __ttm_dma_free_page(pool, d_page);
-       }
-}
-
-/*
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @nr_free: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
- **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
-                                      bool use_static)
-{
-       static struct page *static_buf[NUM_PAGES_TO_ALLOC];
-       unsigned long irq_flags;
-       struct dma_page *dma_p, *tmp;
-       struct page **pages_to_free;
-       struct list_head d_pages;
-       unsigned freed_pages = 0,
-                npages_to_free = nr_free;
-
-       if (NUM_PAGES_TO_ALLOC < nr_free)
-               npages_to_free = NUM_PAGES_TO_ALLOC;
-
-       if (use_static)
-               pages_to_free = static_buf;
-       else
-               pages_to_free = kmalloc_array(npages_to_free,
-                                             sizeof(struct page *),
-                                             GFP_KERNEL);
-
-       if (!pages_to_free) {
-               pr_debug("%s: Failed to allocate memory for pool free operation\n",
-                      pool->dev_name);
-               return 0;
-       }
-       INIT_LIST_HEAD(&d_pages);
-restart:
-       spin_lock_irqsave(&pool->lock, irq_flags);
-
-       /* We picking the oldest ones off the list */
-       list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
-                                        page_list) {
-               if (freed_pages >= npages_to_free)
-                       break;
-
-               /* Move the dma_page from one list to another. */
-               list_move(&dma_p->page_list, &d_pages);
-
-               pages_to_free[freed_pages++] = dma_p->p;
-               /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
-               if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-
-                       ttm_pool_update_free_locked(pool, freed_pages);
-                       /**
-                        * Because changing page caching is costly
-                        * we unlock the pool to prevent stalling.
-                        */
-                       spin_unlock_irqrestore(&pool->lock, irq_flags);
-
-                       ttm_dma_pages_put(pool, &d_pages, pages_to_free,
-                                         freed_pages);
-
-                       INIT_LIST_HEAD(&d_pages);
-
-                       if (likely(nr_free != FREE_ALL_PAGES))
-                               nr_free -= freed_pages;
-
-                       if (NUM_PAGES_TO_ALLOC >= nr_free)
-                               npages_to_free = nr_free;
-                       else
-                               npages_to_free = NUM_PAGES_TO_ALLOC;
-
-                       freed_pages = 0;
-
-                       /* free all so restart the processing */
-                       if (nr_free)
-                               goto restart;
-
-                       /* Not allowed to fall through or break because
-                        * following context is inside spinlock while we are
-                        * outside here.
-                        */
-                       goto out;
-
-               }
-       }
-
-       /* remove range of pages from the pool */
-       if (freed_pages) {
-               ttm_pool_update_free_locked(pool, freed_pages);
-               nr_free -= freed_pages;
-       }
-
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-
-       if (freed_pages)
-               ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
-out:
-       if (pages_to_free != static_buf)
-               kfree(pages_to_free);
-       return nr_free;
-}
-
-static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
-{
-       struct device_pools *p;
-       struct dma_pool *pool;
-
-       if (!dev)
-               return;
-
-       mutex_lock(&_manager->lock);
-       list_for_each_entry_reverse(p, &_manager->pools, pools) {
-               if (p->dev != dev)
-                       continue;
-               pool = p->pool;
-               if (pool->type != type)
-                       continue;
-
-               list_del(&p->pools);
-               kfree(p);
-               _manager->npools--;
-               break;
-       }
-       list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
-               if (pool->type != type)
-                       continue;
-               /* Takes a spinlock.. */
-               /* OK to use static buffer since global mutex is held. */
-               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
-               WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
-               /* This code path is called after _all_ references to the
-                * struct device has been dropped - so nobody should be
-                * touching it. In case somebody is trying to _add_ we are
-                * guarded by the mutex. */
-               list_del(&pool->pools);
-               kfree(pool);
-               break;
-       }
-       mutex_unlock(&_manager->lock);
-}
-
-/*
- * On free-ing of the 'struct device' this deconstructor is run.
- * Albeit the pool might have already been freed earlier.
- */
-static void ttm_dma_pool_release(struct device *dev, void *res)
-{
-       struct dma_pool *pool = *(struct dma_pool **)res;
-
-       if (pool)
-               ttm_dma_free_pool(dev, pool->type);
-}
-
-static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
-{
-       return *(struct dma_pool **)res == match_data;
-}
-
-static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
-                                         enum pool_type type)
-{
-       const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
-       enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
-       struct device_pools *sec_pool = NULL;
-       struct dma_pool *pool = NULL, **ptr;
-       unsigned i;
-       int ret = -ENODEV;
-       char *p;
-
-       if (!dev)
-               return NULL;
-
-       ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
-       if (!ptr)
-               return NULL;
-
-       ret = -ENOMEM;
-
-       pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
-                           dev_to_node(dev));
-       if (!pool)
-               goto err_mem;
-
-       sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
-                               dev_to_node(dev));
-       if (!sec_pool)
-               goto err_mem;
-
-       INIT_LIST_HEAD(&sec_pool->pools);
-       sec_pool->dev = dev;
-       sec_pool->pool =  pool;
-
-       INIT_LIST_HEAD(&pool->free_list);
-       INIT_LIST_HEAD(&pool->pools);
-       spin_lock_init(&pool->lock);
-       pool->dev = dev;
-       pool->npages_free = pool->npages_in_use = 0;
-       pool->nfrees = 0;
-       pool->gfp_flags = flags;
-       if (type & IS_HUGE)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               pool->size = HPAGE_PMD_SIZE;
-#else
-               BUG();
-#endif
-       else
-               pool->size = PAGE_SIZE;
-       pool->type = type;
-       pool->nrefills = 0;
-       p = pool->name;
-       for (i = 0; i < ARRAY_SIZE(t); i++) {
-               if (type & t[i]) {
-                       p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
-                                     "%s", n[i]);
-               }
-       }
-       *p = 0;
-       /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
-        * - the kobj->name has already been deallocated.*/
-       snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
-                dev_driver_string(dev), dev_name(dev));
-       mutex_lock(&_manager->lock);
-       /* You can get the dma_pool from either the global: */
-       list_add(&sec_pool->pools, &_manager->pools);
-       _manager->npools++;
-       /* or from 'struct device': */
-       list_add(&pool->pools, &dev->dma_pools);
-       mutex_unlock(&_manager->lock);
-
-       *ptr = pool;
-       devres_add(dev, ptr);
-
-       return pool;
-err_mem:
-       devres_free(ptr);
-       kfree(sec_pool);
-       kfree(pool);
-       return ERR_PTR(ret);
-}
-
-static struct dma_pool *ttm_dma_find_pool(struct device *dev,
-                                         enum pool_type type)
-{
-       struct dma_pool *pool, *tmp;
-
-       if (type == IS_UNDEFINED)
-               return NULL;
-
-       /* NB: We iterate on the 'struct dev' which has no spinlock, but
-        * it does have a kref which we have taken. The kref is taken during
-        * graphic driver loading - in the drm_pci_init it calls either
-        * pci_dev_get or pci_register_driver which both end up taking a kref
-        * on 'struct device'.
-        *
-        * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
-        * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
-        * thing is at that point of time there are no pages associated with the
-        * driver so this function will not be called.
-        */
-       list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
-               if (pool->type == type)
-                       return pool;
-       return NULL;
-}
-
-/*
- * Free pages the pages that failed to change the caching state. If there
- * are pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
-                                                struct list_head *d_pages,
-                                                struct page **failed_pages,
-                                                unsigned cpages)
-{
-       struct dma_page *d_page, *tmp;
-       struct page *p;
-       unsigned i = 0;
-
-       p = failed_pages[0];
-       if (!p)
-               return;
-       /* Find the failed page. */
-       list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
-               if (d_page->p != p)
-                       continue;
-               /* .. and then progress over the full list. */
-               list_del(&d_page->page_list);
-               __ttm_dma_free_page(pool, d_page);
-               if (++i < cpages)
-                       p = failed_pages[i];
-               else
-                       break;
-       }
-
-}
-
-/*
- * Allocate 'count' pages, and put 'need' number of them on the
- * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
- * The full list of pages should also be on 'd_pages'.
- * We return zero for success, and negative numbers as errors.
- */
-static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
-                                       struct list_head *d_pages,
-                                       unsigned count)
-{
-       struct page **caching_array;
-       struct dma_page *dma_p;
-       struct page *p;
-       int r = 0;
-       unsigned i, j, npages, cpages;
-       unsigned max_cpages = min(count,
-                       (unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
-       /* allocate array for page caching change */
-       caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
-                                     GFP_KERNEL);
-
-       if (!caching_array) {
-               pr_debug("%s: Unable to allocate table for new pages\n",
-                      pool->dev_name);
-               return -ENOMEM;
-       }
-
-       if (count > 1)
-               pr_debug("%s: (%s:%d) Getting %d pages\n",
-                        pool->dev_name, pool->name, current->pid, count);
-
-       for (i = 0, cpages = 0; i < count; ++i) {
-               dma_p = __ttm_dma_alloc_page(pool);
-               if (!dma_p) {
-                       pr_debug("%s: Unable to get page %u\n",
-                                pool->dev_name, i);
-
-                       /* store already allocated pages in the pool after
-                        * setting the caching state */
-                       if (cpages) {
-                               r = ttm_set_pages_caching(pool, caching_array,
-                                                         cpages);
-                               if (r)
-                                       ttm_dma_handle_caching_state_failure(
-                                               pool, d_pages, caching_array,
-                                               cpages);
-                       }
-                       r = -ENOMEM;
-                       goto out;
-               }
-               p = dma_p->p;
-               list_add(&dma_p->page_list, d_pages);
-
-#ifdef CONFIG_HIGHMEM
-               /* gfp flags of highmem page should never be dma32 so we
-                * we should be fine in such case
-                */
-               if (PageHighMem(p))
-                       continue;
-#endif
-
-               npages = pool->size / PAGE_SIZE;
-               for (j = 0; j < npages; ++j) {
-                       caching_array[cpages++] = p + j;
-                       if (cpages == max_cpages) {
-                               /* Note: Cannot hold the spinlock */
-                               r = ttm_set_pages_caching(pool, caching_array,
-                                                         cpages);
-                               if (r) {
-                                       ttm_dma_handle_caching_state_failure(
-                                            pool, d_pages, caching_array,
-                                            cpages);
-                                       goto out;
-                               }
-                               cpages = 0;
-                       }
-               }
-       }
-
-       if (cpages) {
-               r = ttm_set_pages_caching(pool, caching_array, cpages);
-               if (r)
-                       ttm_dma_handle_caching_state_failure(pool, d_pages,
-                                       caching_array, cpages);
-       }
-out:
-       kfree(caching_array);
-       return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- */
-static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
-                                        unsigned long *irq_flags)
-{
-       unsigned count = _manager->options.small;
-       int r = pool->npages_free;
-
-       if (count > pool->npages_free) {
-               struct list_head d_pages;
-
-               INIT_LIST_HEAD(&d_pages);
-
-               spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
-               /* Returns how many more are neccessary to fulfill the
-                * request. */
-               r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
-
-               spin_lock_irqsave(&pool->lock, *irq_flags);
-               if (!r) {
-                       /* Add the fresh to the end.. */
-                       list_splice(&d_pages, &pool->free_list);
-                       ++pool->nrefills;
-                       pool->npages_free += count;
-                       r = count;
-               } else {
-                       struct dma_page *d_page;
-                       unsigned cpages = 0;
-
-                       pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
-                                pool->dev_name, pool->name, r);
-
-                       list_for_each_entry(d_page, &d_pages, page_list) {
-                               cpages++;
-                       }
-                       list_splice_tail(&d_pages, &pool->free_list);
-                       pool->npages_free += cpages;
-                       r = cpages;
-               }
-       }
-       return r;
-}
-
-/*
- * The populate list is actually a stack (not that is matters as TTM
- * allocates one page at a time.
- * return dma_page pointer if success, otherwise NULL.
- */
-static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
-                                 struct ttm_dma_tt *ttm_dma,
-                                 unsigned index)
-{
-       struct dma_page *d_page = NULL;
-       struct ttm_tt *ttm = &ttm_dma->ttm;
-       unsigned long irq_flags;
-       int count;
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
-       if (count) {
-               d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
-               ttm->pages[index] = d_page->p;
-               ttm_dma->dma_address[index] = d_page->dma;
-               list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
-               pool->npages_in_use += 1;
-               pool->npages_free -= 1;
-       }
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-       return d_page;
-}
-
-static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
-{
-       struct ttm_tt *ttm = &ttm_dma->ttm;
-       gfp_t gfp_flags;
-
-       if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
-               gfp_flags = GFP_USER | GFP_DMA32;
-       else
-               gfp_flags = GFP_HIGHUSER;
-       if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-               gfp_flags |= __GFP_ZERO;
-
-       if (huge) {
-               gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
-                       __GFP_KSWAPD_RECLAIM;
-               gfp_flags &= ~__GFP_MOVABLE;
-               gfp_flags &= ~__GFP_COMP;
-       }
-
-       if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
-               gfp_flags |= __GFP_RETRY_MAYFAIL;
-
-       return gfp_flags;
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
- */
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
-                       struct ttm_operation_ctx *ctx)
-{
-       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-       struct ttm_tt *ttm = &ttm_dma->ttm;
-       unsigned long num_pages = ttm->num_pages;
-       struct dma_pool *pool;
-       struct dma_page *d_page;
-       enum pool_type type;
-       unsigned i;
-       int ret;
-
-       if (ttm_tt_is_populated(ttm))
-               return 0;
-
-       if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&ttm_dma->pages_list);
-       i = 0;
-
-       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
-               goto skip_huge;
-
-       pool = ttm_dma_find_pool(dev, type | IS_HUGE);
-       if (!pool) {
-               gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
-
-               pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
-               if (IS_ERR_OR_NULL(pool))
-                       goto skip_huge;
-       }
-
-       while (num_pages >= HPAGE_PMD_NR) {
-               unsigned j;
-
-               d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
-               if (!d_page)
-                       break;
-
-               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
-                                               pool->size, ctx);
-               if (unlikely(ret != 0)) {
-                       ttm_dma_unpopulate(ttm_dma, dev);
-                       return -ENOMEM;
-               }
-
-               d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
-               for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
-                       ttm->pages[j] = ttm->pages[j - 1] + 1;
-                       ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
-                               PAGE_SIZE;
-               }
-
-               i += HPAGE_PMD_NR;
-               num_pages -= HPAGE_PMD_NR;
-       }
-
-skip_huge:
-#endif
-
-       pool = ttm_dma_find_pool(dev, type);
-       if (!pool) {
-               gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
-
-               pool = ttm_dma_pool_init(dev, gfp_flags, type);
-               if (IS_ERR_OR_NULL(pool))
-                       return -ENOMEM;
-       }
-
-       while (num_pages) {
-               d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
-               if (!d_page) {
-                       ttm_dma_unpopulate(ttm_dma, dev);
-                       return -ENOMEM;
-               }
-
-               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
-                                               pool->size, ctx);
-               if (unlikely(ret != 0)) {
-                       ttm_dma_unpopulate(ttm_dma, dev);
-                       return -ENOMEM;
-               }
-
-               d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
-               ++i;
-               --num_pages;
-       }
-
-       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
-               ret = ttm_tt_swapin(ttm);
-               if (unlikely(ret != 0)) {
-                       ttm_dma_unpopulate(ttm_dma, dev);
-                       return ret;
-               }
-       }
-
-       ttm_tt_set_populated(ttm);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_populate);
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
-{
-       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-       struct ttm_tt *ttm = &ttm_dma->ttm;
-       struct dma_pool *pool;
-       struct dma_page *d_page, *next;
-       enum pool_type type;
-       bool is_cached = false;
-       unsigned count, i, npages = 0;
-       unsigned long irq_flags;
-
-       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       pool = ttm_dma_find_pool(dev, type | IS_HUGE);
-       if (pool) {
-               count = 0;
-               list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
-                                        page_list) {
-                       if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
-                               continue;
-
-                       count++;
-                       if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
-                               ttm_mem_global_free_page(mem_glob, d_page->p,
-                                                        pool->size);
-                               d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
-                       }
-                       ttm_dma_page_put(pool, d_page);
-               }
-
-               spin_lock_irqsave(&pool->lock, irq_flags);
-               pool->npages_in_use -= count;
-               pool->nfrees += count;
-               spin_unlock_irqrestore(&pool->lock, irq_flags);
-       }
-#endif
-
-       pool = ttm_dma_find_pool(dev, type);
-       if (!pool)
-               return;
-
-       is_cached = (ttm_dma_find_pool(pool->dev,
-                    ttm_to_type(ttm->page_flags, tt_cached)) == pool);
-
-       /* make sure pages array match list and count number of pages */
-       count = 0;
-       list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
-                                page_list) {
-               ttm->pages[count] = d_page->p;
-               count++;
-
-               if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
-                       ttm_mem_global_free_page(mem_glob, d_page->p,
-                                                pool->size);
-                       d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
-               }
-
-               if (is_cached)
-                       ttm_dma_page_put(pool, d_page);
-       }
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       pool->npages_in_use -= count;
-       if (is_cached) {
-               pool->nfrees += count;
-       } else {
-               pool->npages_free += count;
-               list_splice(&ttm_dma->pages_list, &pool->free_list);
-               /*
-                * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
-                * to free in order to minimize calls to set_memory_wb().
-                */
-               if (pool->npages_free >= (_manager->options.max_size +
-                                         NUM_PAGES_TO_ALLOC))
-                       npages = pool->npages_free - _manager->options.max_size;
-       }
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-
-       INIT_LIST_HEAD(&ttm_dma->pages_list);
-       for (i = 0; i < ttm->num_pages; i++) {
-               ttm->pages[i] = NULL;
-               ttm_dma->dma_address[i] = 0;
-       }
-
-       /* shrink pool if necessary (only on !is_cached pools)*/
-       if (npages)
-               ttm_dma_page_pool_free(pool, npages, false);
-       ttm_tt_set_unpopulated(ttm);
-}
-EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
- * shrinkers
- */
-static unsigned long
-ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
-       static unsigned start_pool;
-       unsigned idx = 0;
-       unsigned pool_offset;
-       unsigned shrink_pages = sc->nr_to_scan;
-       struct device_pools *p;
-       unsigned long freed = 0;
-
-       if (list_empty(&_manager->pools))
-               return SHRINK_STOP;
-
-       if (!mutex_trylock(&_manager->lock))
-               return SHRINK_STOP;
-       if (!_manager->npools)
-               goto out;
-       pool_offset = ++start_pool % _manager->npools;
-       list_for_each_entry(p, &_manager->pools, pools) {
-               unsigned nr_free;
-
-               if (!p->dev)
-                       continue;
-               if (shrink_pages == 0)
-                       break;
-               /* Do it in round-robin fashion. */
-               if (++idx < pool_offset)
-                       continue;
-               nr_free = shrink_pages;
-               /* OK to use static buffer since global mutex is held. */
-               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
-               freed += nr_free - shrink_pages;
-
-               pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
-                        p->pool->dev_name, p->pool->name, current->pid,
-                        nr_free, shrink_pages);
-       }
-out:
-       mutex_unlock(&_manager->lock);
-       return freed;
-}
-
-static unsigned long
-ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
-       struct device_pools *p;
-       unsigned long count = 0;
-
-       if (!mutex_trylock(&_manager->lock))
-               return 0;
-       list_for_each_entry(p, &_manager->pools, pools)
-               count += p->pool->npages_free;
-       mutex_unlock(&_manager->lock);
-       return count;
-}
-
-static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
-       manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
-       manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
-       manager->mm_shrink.seeks = 1;
-       return register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
-       unregister_shrinker(&manager->mm_shrink);
-}
-
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
-       int ret;
-
-       WARN_ON(_manager);
-
-       pr_info("Initializing DMA pool allocator\n");
-
-       _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
-       if (!_manager)
-               return -ENOMEM;
-
-       mutex_init(&_manager->lock);
-       INIT_LIST_HEAD(&_manager->pools);
-
-       _manager->options.max_size = max_pages;
-       _manager->options.small = SMALL_ALLOCATION;
-       _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
-       /* This takes care of auto-freeing the _manager */
-       ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
-                                  &glob->kobj, "dma_pool");
-       if (unlikely(ret != 0))
-               goto error;
-
-       ret = ttm_dma_pool_mm_shrink_init(_manager);
-       if (unlikely(ret != 0))
-               goto error;
-       return 0;
-
-error:
-       kobject_put(&_manager->kobj);
-       _manager = NULL;
-       return ret;
-}
-
-void ttm_dma_page_alloc_fini(void)
-{
-       struct device_pools *p, *t;
-
-       pr_info("Finalizing DMA pool allocator\n");
-       ttm_dma_pool_mm_shrink_fini(_manager);
-
-       list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
-               dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
-                       current->pid);
-               WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
-                       ttm_dma_pool_match, p->pool));
-               ttm_dma_free_pool(p->dev, p->pool->type);
-       }
-       kobject_put(&_manager->kobj);
-       _manager = NULL;
-}
-
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
-       struct device_pools *p;
-       struct dma_pool *pool = NULL;
-
-       if (!_manager) {
-               seq_printf(m, "No pool allocator running.\n");
-               return 0;
-       }
-       seq_printf(m, "         pool      refills   pages freed    inuse available     name\n");
-       mutex_lock(&_manager->lock);
-       list_for_each_entry(p, &_manager->pools, pools) {
-               struct device *dev = p->dev;
-               if (!dev)
-                       continue;
-               pool = p->pool;
-               seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
-                               pool->name, pool->nrefills,
-                               pool->nfrees, pool->npages_in_use,
-                               pool->npages_free,
-                               pool->dev_name);
-       }
-       mutex_unlock(&_manager->lock);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
new file mode 100644 (file)
index 0000000..1e50dee
--- /dev/null
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+/* Pooling of allocated pages is necessary because changing the caching
+ * attributes on x86 of the linear mapping requires a costly cross CPU TLB
+ * invalidate for those addresses.
+ *
+ * Additional to that allocations from the DMA coherent API are pooled as well
+ * cause they are rather slow compared to alloc_pages+map.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include <drm/ttm/ttm_pool.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
+
+/**
+ * struct ttm_pool_dma - Helper object for coherent DMA mappings
+ *
+ * @addr: original DMA address returned for the mapping
+ * @vaddr: original vaddr return for the mapping and order in the lower bits
+ */
+struct ttm_pool_dma {
+       dma_addr_t addr;
+       unsigned long vaddr;
+};
+
+static unsigned long page_pool_size;
+
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+module_param(page_pool_size, ulong, 0644);
+
+static atomic_long_t allocated_pages;
+
+static struct ttm_pool_type global_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_uncached[MAX_ORDER];
+
+static spinlock_t shrinker_lock;
+static struct list_head shrinker_list;
+static struct shrinker mm_shrinker;
+
+/* Allocate pages of size 1 << order with the given gfp_flags */
+static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
+                                       unsigned int order)
+{
+       unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+       struct ttm_pool_dma *dma;
+       struct page *p;
+       void *vaddr;
+
+       if (order) {
+               gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+                       __GFP_KSWAPD_RECLAIM;
+               gfp_flags &= ~__GFP_MOVABLE;
+               gfp_flags &= ~__GFP_COMP;
+       }
+
+       if (!pool->use_dma_alloc) {
+               p = alloc_pages(gfp_flags, order);
+               if (p)
+                       p->private = order;
+               return p;
+       }
+
+       dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+       if (!dma)
+               return NULL;
+
+       if (order)
+               attr |= DMA_ATTR_NO_WARN;
+
+       vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
+                               &dma->addr, gfp_flags, attr);
+       if (!vaddr)
+               goto error_free;
+
+       /* TODO: This is an illegal abuse of the DMA API, but we need to rework
+        * TTM page fault handling and extend the DMA API to clean this up.
+        */
+       if (is_vmalloc_addr(vaddr))
+               p = vmalloc_to_page(vaddr);
+       else
+               p = virt_to_page(vaddr);
+
+       dma->vaddr = (unsigned long)vaddr | order;
+       p->private = (unsigned long)dma;
+       return p;
+
+error_free:
+       kfree(dma);
+       return NULL;
+}
+
+/* Reset the caching and pages of size 1 << order */
+static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
+                              unsigned int order, struct page *p)
+{
+       unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+       struct ttm_pool_dma *dma;
+       void *vaddr;
+
+#ifdef CONFIG_X86
+       /* We don't care that set_pages_wb is inefficient here. This is only
+        * used when we have to shrink and CPU overhead is irrelevant then.
+        */
+       if (caching != ttm_cached && !PageHighMem(p))
+               set_pages_wb(p, 1 << order);
+#endif
+
+       if (!pool->use_dma_alloc) {
+               __free_pages(p, order);
+               return;
+       }
+
+       if (order)
+               attr |= DMA_ATTR_NO_WARN;
+
+       dma = (void *)p->private;
+       vaddr = (void *)(dma->vaddr & PAGE_MASK);
+       dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
+                      attr);
+       kfree(dma);
+}
+
+/* Apply a new caching to an array of pages */
+static int ttm_pool_apply_caching(struct page **first, struct page **last,
+                                 enum ttm_caching caching)
+{
+#ifdef CONFIG_X86
+       unsigned int num_pages = last - first;
+
+       if (!num_pages)
+               return 0;
+
+       switch (caching) {
+       case ttm_cached:
+               break;
+       case ttm_write_combined:
+               return set_pages_array_wc(first, num_pages);
+       case ttm_uncached:
+               return set_pages_array_uc(first, num_pages);
+       }
+#endif
+       return 0;
+}
+
+/* Map pages of 1 << order size and fill the DMA address array  */
+static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
+                       struct page *p, dma_addr_t **dma_addr)
+{
+       dma_addr_t addr;
+       unsigned int i;
+
+       if (pool->use_dma_alloc) {
+               struct ttm_pool_dma *dma = (void *)p->private;
+
+               addr = dma->addr;
+       } else {
+               size_t size = (1ULL << order) * PAGE_SIZE;
+
+               addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(pool->dev, **dma_addr))
+                       return -EFAULT;
+       }
+
+       for (i = 1 << order; i ; --i) {
+               *(*dma_addr)++ = addr;
+               addr += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+/* Unmap pages of 1 << order size */
+static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
+                          unsigned int num_pages)
+{
+       /* Unmapped while freeing the page */
+       if (pool->use_dma_alloc)
+               return;
+
+       dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
+                      DMA_BIDIRECTIONAL);
+}
+
+/* Give pages into a specific pool_type */
+static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
+{
+       spin_lock(&pt->lock);
+       list_add(&p->lru, &pt->pages);
+       spin_unlock(&pt->lock);
+       atomic_long_add(1 << pt->order, &allocated_pages);
+}
+
+/* Take pages from a specific pool_type, return NULL when nothing available */
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+{
+       struct page *p;
+
+       spin_lock(&pt->lock);
+       p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
+       if (p) {
+               atomic_long_sub(1 << pt->order, &allocated_pages);
+               list_del(&p->lru);
+       }
+       spin_unlock(&pt->lock);
+
+       return p;
+}
+
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+       unsigned int count = 0;
+       struct page *p;
+
+       spin_lock(&pt->lock);
+       /* Only used for debugfs, the overhead doesn't matter */
+       list_for_each_entry(p, &pt->pages, lru)
+               ++count;
+       spin_unlock(&pt->lock);
+
+       return count;
+}
+
+/* Initialize and add a pool type to the global shrinker list */
+static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+                              enum ttm_caching caching, unsigned int order)
+{
+       pt->pool = pool;
+       pt->caching = caching;
+       pt->order = order;
+       spin_lock_init(&pt->lock);
+       INIT_LIST_HEAD(&pt->pages);
+
+       spin_lock(&shrinker_lock);
+       list_add_tail(&pt->shrinker_list, &shrinker_list);
+       spin_unlock(&shrinker_lock);
+}
+
+/* Remove a pool_type from the global shrinker list and free all pages */
+static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+{
+       struct page *p, *tmp;
+
+       spin_lock(&shrinker_lock);
+       list_del(&pt->shrinker_list);
+       spin_unlock(&shrinker_lock);
+
+       list_for_each_entry_safe(p, tmp, &pt->pages, lru)
+               ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+}
+
+/* Return the pool_type to use for the given caching and order */
+static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
+                                                 enum ttm_caching caching,
+                                                 unsigned int order)
+{
+       if (pool->use_dma_alloc)
+               return &pool->caching[caching].orders[order];
+
+#ifdef CONFIG_X86
+       switch (caching) {
+       case ttm_write_combined:
+               return &global_write_combined[order];
+       case ttm_uncached:
+               return &global_uncached[order];
+       default:
+               break;
+       }
+#endif
+
+       return NULL;
+}
+
+/* Free pages using the global shrinker list */
+static unsigned int ttm_pool_shrink(void)
+{
+       struct ttm_pool_type *pt;
+       unsigned int num_freed;
+       struct page *p;
+
+       spin_lock(&shrinker_lock);
+       pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+
+       p = ttm_pool_type_take(pt);
+       if (p) {
+               ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+               num_freed = 1 << pt->order;
+       } else {
+               num_freed = 0;
+       }
+
+       list_move_tail(&pt->shrinker_list, &shrinker_list);
+       spin_unlock(&shrinker_lock);
+
+       return num_freed;
+}
+
+/* Return the allocation order based for a page */
+static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+{
+       if (pool->use_dma_alloc) {
+               struct ttm_pool_dma *dma = (void *)p->private;
+
+               return dma->vaddr & ~PAGE_MASK;
+       }
+
+       return p->private;
+}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+                  struct ttm_operation_ctx *ctx)
+{
+       unsigned long num_pages = tt->num_pages;
+       dma_addr_t *dma_addr = tt->dma_address;
+       struct page **caching = tt->pages;
+       struct page **pages = tt->pages;
+       gfp_t gfp_flags = GFP_USER;
+       unsigned int i, order;
+       struct page *p;
+       int r;
+
+       WARN_ON(!num_pages || ttm_tt_is_populated(tt));
+       WARN_ON(dma_addr && !pool->dev);
+
+       if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+               gfp_flags |= __GFP_ZERO;
+
+       if (tt->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+               gfp_flags |= __GFP_RETRY_MAYFAIL;
+
+       if (pool->use_dma32)
+               gfp_flags |= GFP_DMA32;
+       else
+               gfp_flags |= GFP_HIGHUSER;
+
+       for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+            order = min_t(unsigned int, order, __fls(num_pages))) {
+               bool apply_caching = false;
+               struct ttm_pool_type *pt;
+
+               pt = ttm_pool_select_type(pool, tt->caching, order);
+               p = pt ? ttm_pool_type_take(pt) : NULL;
+               if (p) {
+                       apply_caching = true;
+               } else {
+                       p = ttm_pool_alloc_page(pool, gfp_flags, order);
+                       if (p && PageHighMem(p))
+                               apply_caching = true;
+               }
+
+               if (!p) {
+                       if (order) {
+                               --order;
+                               continue;
+                       }
+                       r = -ENOMEM;
+                       goto error_free_all;
+               }
+
+               if (apply_caching) {
+                       r = ttm_pool_apply_caching(caching, pages,
+                                                  tt->caching);
+                       if (r)
+                               goto error_free_page;
+                       caching = pages + (1 << order);
+               }
+
+               r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
+                                             (1 << order) * PAGE_SIZE,
+                                             ctx);
+               if (r)
+                       goto error_free_page;
+
+               if (dma_addr) {
+                       r = ttm_pool_map(pool, order, p, &dma_addr);
+                       if (r)
+                               goto error_global_free;
+               }
+
+               num_pages -= 1 << order;
+               for (i = 1 << order; i; --i)
+                       *(pages++) = p++;
+       }
+
+       r = ttm_pool_apply_caching(caching, pages, tt->caching);
+       if (r)
+               goto error_free_all;
+
+       return 0;
+
+error_global_free:
+       ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
+
+error_free_page:
+       ttm_pool_free_page(pool, tt->caching, order, p);
+
+error_free_all:
+       num_pages = tt->num_pages - num_pages;
+       for (i = 0; i < num_pages; ) {
+               order = ttm_pool_page_order(pool, tt->pages[i]);
+               ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
+               i += 1 << order;
+       }
+
+       return r;
+}
+EXPORT_SYMBOL(ttm_pool_alloc);
+
+/**
+ * ttm_pool_free - Free the backing pages from a ttm_tt object
+ *
+ * @pool: Pool to give pages back to.
+ * @tt: ttm_tt object to unpopulate
+ *
+ * Give the packing pages back to a pool or free them
+ */
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+{
+       unsigned int i;
+
+       for (i = 0; i < tt->num_pages; ) {
+               struct page *p = tt->pages[i];
+               unsigned int order, num_pages;
+               struct ttm_pool_type *pt;
+
+               order = ttm_pool_page_order(pool, p);
+               num_pages = 1ULL << order;
+               ttm_mem_global_free_page(&ttm_mem_glob, p,
+                                        num_pages * PAGE_SIZE);
+               if (tt->dma_address)
+                       ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+
+               pt = ttm_pool_select_type(pool, tt->caching, order);
+               if (pt)
+                       ttm_pool_type_give(pt, tt->pages[i]);
+               else
+                       ttm_pool_free_page(pool, tt->caching, order,
+                                          tt->pages[i]);
+
+               i += num_pages;
+       }
+
+       while (atomic_long_read(&allocated_pages) > page_pool_size)
+               ttm_pool_shrink();
+}
+EXPORT_SYMBOL(ttm_pool_free);
+
+/**
+ * ttm_pool_init - Initialize a pool
+ *
+ * @pool: the pool to initialize
+ * @dev: device for DMA allocations and mappings
+ * @use_dma_alloc: true if coherent DMA alloc should be used
+ * @use_dma32: true if GFP_DMA32 should be used
+ *
+ * Initialize the pool and its pool types.
+ */
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+                  bool use_dma_alloc, bool use_dma32)
+{
+       unsigned int i, j;
+
+       WARN_ON(!dev && use_dma_alloc);
+
+       pool->dev = dev;
+       pool->use_dma_alloc = use_dma_alloc;
+       pool->use_dma32 = use_dma32;
+
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+               for (j = 0; j < MAX_ORDER; ++j)
+                       ttm_pool_type_init(&pool->caching[i].orders[j],
+                                          pool, i, j);
+}
+EXPORT_SYMBOL(ttm_pool_init);
+
+/**
+ * ttm_pool_fini - Cleanup a pool
+ *
+ * @pool: the pool to clean up
+ *
+ * Free all pages in the pool and unregister the types from the global
+ * shrinker.
+ */
+void ttm_pool_fini(struct ttm_pool *pool)
+{
+       unsigned int i, j;
+
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+               for (j = 0; j < MAX_ORDER; ++j)
+                       ttm_pool_type_fini(&pool->caching[i].orders[j]);
+}
+EXPORT_SYMBOL(ttm_pool_fini);
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Dump information about the different pool types */
+static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
+                                   struct seq_file *m)
+{
+       unsigned int i;
+
+       for (i = 0; i < MAX_ORDER; ++i)
+               seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
+       seq_puts(m, "\n");
+}
+
+/**
+ * ttm_pool_debugfs - Debugfs dump function for a pool
+ *
+ * @pool: the pool to dump the information for
+ * @m: seq_file to dump to
+ *
+ * Make a debugfs dump with the per pool and global information.
+ */
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
+{
+       unsigned int i;
+
+       spin_lock(&shrinker_lock);
+
+       seq_puts(m, "\t ");
+       for (i = 0; i < MAX_ORDER; ++i)
+               seq_printf(m, " ---%2u---", i);
+       seq_puts(m, "\n");
+
+       seq_puts(m, "wc\t:");
+       ttm_pool_debugfs_orders(global_write_combined, m);
+       seq_puts(m, "uc\t:");
+       ttm_pool_debugfs_orders(global_uncached, m);
+
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               seq_puts(m, "DMA ");
+               switch (i) {
+               case ttm_cached:
+                       seq_puts(m, "\t:");
+                       break;
+               case ttm_write_combined:
+                       seq_puts(m, "wc\t:");
+                       break;
+               case ttm_uncached:
+                       seq_puts(m, "uc\t:");
+                       break;
+               }
+               ttm_pool_debugfs_orders(pool->caching[i].orders, m);
+       }
+
+       seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
+                  atomic_long_read(&allocated_pages), page_pool_size);
+
+       spin_unlock(&shrinker_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_pool_debugfs);
+
+#endif
+
+/* As long as pages are available make sure to release at least one */
+static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+                                           struct shrink_control *sc)
+{
+       unsigned long num_freed = 0;
+
+       do
+               num_freed += ttm_pool_shrink();
+       while (!num_freed && atomic_long_read(&allocated_pages));
+
+       return num_freed;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
+                                            struct shrink_control *sc)
+{
+       unsigned long num_pages = atomic_long_read(&allocated_pages);
+
+       return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+/**
+ * ttm_pool_mgr_init - Initialize globals
+ *
+ * @num_pages: default number of pages
+ *
+ * Initialize the global locks and lists for the MM shrinker.
+ */
+int ttm_pool_mgr_init(unsigned long num_pages)
+{
+       unsigned int i;
+
+       if (!page_pool_size)
+               page_pool_size = num_pages;
+
+       spin_lock_init(&shrinker_lock);
+       INIT_LIST_HEAD(&shrinker_list);
+
+       for (i = 0; i < MAX_ORDER; ++i) {
+               ttm_pool_type_init(&global_write_combined[i], NULL,
+                                  ttm_write_combined, i);
+               ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+       }
+
+       mm_shrinker.count_objects = ttm_pool_shrinker_count;
+       mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
+       mm_shrinker.seeks = 1;
+       return register_shrinker(&mm_shrinker);
+}
+
+/**
+ * ttm_pool_mgr_fini - Finalize globals
+ *
+ * Cleanup the global pools and unregister the MM shrinker.
+ */
+void ttm_pool_mgr_fini(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < MAX_ORDER; ++i) {
+               ttm_pool_type_fini(&global_write_combined[i]);
+               ttm_pool_type_fini(&global_uncached[i]);
+       }
+
+       unregister_shrinker(&mm_shrinker);
+       WARN_ON(!list_empty(&shrinker_list));
+}
index 1da0e277c5111492c6e65d12a4c53772a3a1b309..ea77919569a2e7517fe1f0ad2bdf29e25c64c083 100644 (file)
@@ -149,7 +149,7 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev,
 
        ttm_resource_manager_set_used(man, false);
 
-       ret = ttm_resource_manager_force_list_clean(bdev, man);
+       ret = ttm_resource_manager_evict_all(bdev, man);
        if (ret)
                return ret;
 
index b325b9264203ca9bc1daff0da3385f16a5c9dcce..4ebc043e28671b559fdeb007d766d58ece980b2e 100644 (file)
@@ -75,16 +75,16 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
 EXPORT_SYMBOL(ttm_resource_manager_init);
 
 /*
- * ttm_resource_manager_force_list_clean
+ * ttm_resource_manager_evict_all
  *
  * @bdev - device to use
  * @man - manager to use
  *
- * Force all the objects out of a memory manager until clean.
+ * Evict all the objects out of a memory manager until it is empty.
  * Part of memory manager cleanup sequence.
  */
-int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
-                                         struct ttm_resource_manager *man)
+int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+                                  struct ttm_resource_manager *man)
 {
        struct ttm_operation_ctx ctx = {
                .interruptible = false,
@@ -126,7 +126,7 @@ int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
 
        return 0;
 }
-EXPORT_SYMBOL(ttm_resource_manager_force_list_clean);
+EXPORT_SYMBOL(ttm_resource_manager_evict_all);
 
 /**
  * ttm_resource_manager_debug
index f43fa69a1e65081448e8eeebf159268209d96697..8861a74ac3351d82c1edf9394a855d23aa98840c 100644 (file)
@@ -37,8 +37,6 @@
 #include <linux/file.h>
 #include <drm/drm_cache.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
 
 /**
  * Allocates a ttm structure for the given BO.
@@ -53,9 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
        if (bo->ttm)
                return 0;
 
-       if (bdev->need_dma32)
-               page_flags |= TTM_PAGE_FLAG_DMA32;
-
        if (bdev->no_retry)
                page_flags |= TTM_PAGE_FLAG_NO_RETRY;
 
@@ -93,21 +88,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
        return 0;
 }
 
-static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-       ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
-                                         sizeof(*ttm->ttm.pages) +
-                                         sizeof(*ttm->dma_address),
-                                         GFP_KERNEL | __GFP_ZERO);
-       if (!ttm->ttm.pages)
+       ttm->pages = kvmalloc_array(ttm->num_pages,
+                                   sizeof(*ttm->pages) +
+                                   sizeof(*ttm->dma_address),
+                                   GFP_KERNEL | __GFP_ZERO);
+       if (!ttm->pages)
                return -ENOMEM;
-       ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+
+       ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
        return 0;
 }
 
-static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-       ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+       ttm->dma_address = kvmalloc_array(ttm->num_pages,
                                          sizeof(*ttm->dma_address),
                                          GFP_KERNEL | __GFP_ZERO);
        if (!ttm->dma_address)
@@ -115,104 +111,11 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
        return 0;
 }
 
-static int ttm_tt_set_page_caching(struct page *p,
-                                  enum ttm_caching_state c_old,
-                                  enum ttm_caching_state c_new)
-{
-       int ret = 0;
-
-       if (PageHighMem(p))
-               return 0;
-
-       if (c_old != tt_cached) {
-               /* p isn't in the default caching state, set it to
-                * writeback first to free its current memtype. */
-
-               ret = ttm_set_pages_wb(p, 1);
-               if (ret)
-                       return ret;
-       }
-
-       if (c_new == tt_wc)
-               ret = ttm_set_pages_wc(p, 1);
-       else if (c_new == tt_uncached)
-               ret = ttm_set_pages_uc(p, 1);
-
-       return ret;
-}
-
-/*
- * Change caching policy for the linear kernel map
- * for range of pages in a ttm.
- */
-
-static int ttm_tt_set_caching(struct ttm_tt *ttm,
-                             enum ttm_caching_state c_state)
-{
-       int i, j;
-       struct page *cur_page;
-       int ret;
-
-       if (ttm->caching_state == c_state)
-               return 0;
-
-       if (!ttm_tt_is_populated(ttm)) {
-               /* Change caching but don't populate */
-               ttm->caching_state = c_state;
-               return 0;
-       }
-
-       if (ttm->caching_state == tt_cached)
-               drm_clflush_pages(ttm->pages, ttm->num_pages);
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               cur_page = ttm->pages[i];
-               if (likely(cur_page != NULL)) {
-                       ret = ttm_tt_set_page_caching(cur_page,
-                                                     ttm->caching_state,
-                                                     c_state);
-                       if (unlikely(ret != 0))
-                               goto out_err;
-               }
-       }
-
-       ttm->caching_state = c_state;
-
-       return 0;
-
-out_err:
-       for (j = 0; j < i; ++j) {
-               cur_page = ttm->pages[j];
-               if (likely(cur_page != NULL)) {
-                       (void)ttm_tt_set_page_caching(cur_page, c_state,
-                                                     ttm->caching_state);
-               }
-       }
-
-       return ret;
-}
-
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
-{
-       enum ttm_caching_state state;
-
-       if (placement & TTM_PL_FLAG_WC)
-               state = tt_wc;
-       else if (placement & TTM_PL_FLAG_UNCACHED)
-               state = tt_uncached;
-       else
-               state = tt_cached;
-
-       return ttm_tt_set_caching(ttm, state);
-}
-EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-
 void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 {
        ttm_tt_unpopulate(bdev, ttm);
 
-       if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
-           ttm->swap_storage)
+       if (ttm->swap_storage)
                fput(ttm->swap_storage);
 
        ttm->swap_storage = NULL;
@@ -226,20 +129,22 @@ void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 
 static void ttm_tt_init_fields(struct ttm_tt *ttm,
                               struct ttm_buffer_object *bo,
-                              uint32_t page_flags)
+                              uint32_t page_flags,
+                              enum ttm_caching caching)
 {
        ttm->num_pages = bo->num_pages;
-       ttm->caching_state = tt_cached;
+       ttm->caching = ttm_cached;
        ttm->page_flags = page_flags;
-       ttm_tt_set_unpopulated(ttm);
+       ttm->dma_address = NULL;
        ttm->swap_storage = NULL;
        ttm->sg = bo->sg;
+       ttm->caching = caching;
 }
 
 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
-               uint32_t page_flags)
+               uint32_t page_flags, enum ttm_caching caching)
 {
-       ttm_tt_init_fields(ttm, bo, page_flags);
+       ttm_tt_init_fields(ttm, bo, page_flags, caching);
 
        if (ttm_tt_alloc_page_directory(ttm)) {
                pr_err("Failed allocating page table\n");
@@ -251,20 +156,21 @@ EXPORT_SYMBOL(ttm_tt_init);
 
 void ttm_tt_fini(struct ttm_tt *ttm)
 {
-       kvfree(ttm->pages);
+       if (ttm->pages)
+               kvfree(ttm->pages);
+       else
+               kvfree(ttm->dma_address);
        ttm->pages = NULL;
+       ttm->dma_address = NULL;
 }
 EXPORT_SYMBOL(ttm_tt_fini);
 
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
-                   uint32_t page_flags)
+int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+                   uint32_t page_flags, enum ttm_caching caching)
 {
-       struct ttm_tt *ttm = &ttm_dma->ttm;
-
-       ttm_tt_init_fields(ttm, bo, page_flags);
+       ttm_tt_init_fields(ttm, bo, page_flags, caching);
 
-       INIT_LIST_HEAD(&ttm_dma->pages_list);
-       if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
+       if (ttm_dma_tt_alloc_page_directory(ttm)) {
                pr_err("Failed allocating page table\n");
                return -ENOMEM;
        }
@@ -272,19 +178,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_dma_tt_init);
 
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
-                  uint32_t page_flags)
+int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+                  uint32_t page_flags, enum ttm_caching caching)
 {
-       struct ttm_tt *ttm = &ttm_dma->ttm;
        int ret;
 
-       ttm_tt_init_fields(ttm, bo, page_flags);
+       ttm_tt_init_fields(ttm, bo, page_flags, caching);
 
-       INIT_LIST_HEAD(&ttm_dma->pages_list);
        if (page_flags & TTM_PAGE_FLAG_SG)
-               ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+               ret = ttm_sg_tt_alloc_page_directory(ttm);
        else
-               ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+               ret = ttm_dma_tt_alloc_page_directory(ttm);
        if (ret) {
                pr_err("Failed allocating page table\n");
                return -ENOMEM;
@@ -293,92 +197,73 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_sg_tt_init);
 
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
-       struct ttm_tt *ttm = &ttm_dma->ttm;
-
-       if (ttm->pages)
-               kvfree(ttm->pages);
-       else
-               kvfree(ttm_dma->dma_address);
-       ttm->pages = NULL;
-       ttm_dma->dma_address = NULL;
-}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
-
 int ttm_tt_swapin(struct ttm_tt *ttm)
 {
        struct address_space *swap_space;
        struct file *swap_storage;
        struct page *from_page;
        struct page *to_page;
-       int i;
-       int ret = -ENOMEM;
+       gfp_t gfp_mask;
+       int i, ret;
 
        swap_storage = ttm->swap_storage;
        BUG_ON(swap_storage == NULL);
 
        swap_space = swap_storage->f_mapping;
+       gfp_mask = mapping_gfp_mask(swap_space);
+       if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+               gfp_mask |= __GFP_RETRY_MAYFAIL;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
-               gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
-               from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
-
+               from_page = shmem_read_mapping_page_gfp(swap_space, i,
+                                                       gfp_mask);
                if (IS_ERR(from_page)) {
                        ret = PTR_ERR(from_page);
                        goto out_err;
                }
                to_page = ttm->pages[i];
-               if (unlikely(to_page == NULL))
+               if (unlikely(to_page == NULL)) {
+                       ret = -ENOMEM;
                        goto out_err;
+               }
 
                copy_highpage(to_page, from_page);
                put_page(from_page);
        }
 
-       if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
-               fput(swap_storage);
+       fput(swap_storage);
        ttm->swap_storage = NULL;
        ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 
        return 0;
+
 out_err:
        return ret;
 }
 
-int ttm_tt_swapout(struct ttm_bo_device *bdev,
-                  struct ttm_tt *ttm, struct file *persistent_swap_storage)
+int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 {
        struct address_space *swap_space;
        struct file *swap_storage;
        struct page *from_page;
        struct page *to_page;
-       int i;
-       int ret = -ENOMEM;
-
-       BUG_ON(ttm->caching_state != tt_cached);
-
-       if (!persistent_swap_storage) {
-               swap_storage = shmem_file_setup("ttm swap",
-                                               ttm->num_pages << PAGE_SHIFT,
-                                               0);
-               if (IS_ERR(swap_storage)) {
-                       pr_err("Failed allocating swap storage\n");
-                       return PTR_ERR(swap_storage);
-               }
-       } else {
-               swap_storage = persistent_swap_storage;
+       gfp_t gfp_mask;
+       int i, ret;
+
+       swap_storage = shmem_file_setup("ttm swap",
+                                       ttm->num_pages << PAGE_SHIFT,
+                                       0);
+       if (IS_ERR(swap_storage)) {
+               pr_err("Failed allocating swap storage\n");
+               return PTR_ERR(swap_storage);
        }
 
        swap_space = swap_storage->f_mapping;
+       gfp_mask = mapping_gfp_mask(swap_space);
+       if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+               gfp_mask |= __GFP_RETRY_MAYFAIL;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
-               gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
-
                from_page = ttm->pages[i];
                if (unlikely(from_page == NULL))
                        continue;
@@ -397,13 +282,11 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev,
        ttm_tt_unpopulate(bdev, ttm);
        ttm->swap_storage = swap_storage;
        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
-       if (persistent_swap_storage)
-               ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
 
        return 0;
+
 out_err:
-       if (!persistent_swap_storage)
-               fput(swap_storage);
+       fput(swap_storage);
 
        return ret;
 }
@@ -433,10 +316,21 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
        if (bdev->driver->ttm_tt_populate)
                ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
        else
-               ret = ttm_pool_populate(ttm, ctx);
-       if (!ret)
-               ttm_tt_add_mapping(bdev, ttm);
-       return ret;
+               ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+       if (ret)
+               return ret;
+
+       ttm_tt_add_mapping(bdev, ttm);
+       ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0)) {
+                       ttm_tt_unpopulate(bdev, ttm);
+                       return ret;
+               }
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL(ttm_tt_populate);
 
@@ -464,5 +358,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
        if (bdev->driver->ttm_tt_unpopulate)
                bdev->driver->ttm_tt_unpopulate(bdev, ttm);
        else
-               ttm_pool_unpopulate(ttm);
+               ttm_pool_free(&bdev->pool, ttm);
+       ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
 }
index c3aa39bd38ecdf81d5d9c3cc2d4bc6869f473858..b5259cb1383fc22be0ce43c7a912c289187d7d05 100644 (file)
@@ -200,8 +200,8 @@ static int tve200_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq) {
-               ret = -EINVAL;
+       if (irq < 0) {
+               ret = irq;
                goto clk_disable;
        }
 
index 4fcc0a542b8a65ba54ab369f488402f402be1cd0..931c55126148be0feab045261d5d259637a78c72 100644 (file)
@@ -213,12 +213,12 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
 }
 
 static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_crtc_state)
+                                   struct drm_atomic_state *state)
 {
 }
 
 static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_crtc_state)
+                                    struct drm_atomic_state *state)
 {
 }
 
index 74ceebd62fbce4aa131ff6444bfc912c630ec7c9..f432278173cd315bf76f232aa2096cb11f5c0eae 100644 (file)
@@ -21,6 +21,8 @@
 #include "vc4_drv.h"
 #include "uapi/drm/vc4_drm.h"
 
+static vm_fault_t vc4_fault(struct vm_fault *vmf);
+
 static const char * const bo_type_names[] = {
        "kernel",
        "V3D",
@@ -374,6 +376,21 @@ out:
        return bo;
 }
 
+static const struct vm_operations_struct vc4_vm_ops = {
+       .fault = vc4_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+       .free = vc4_free_object,
+       .export = vc4_prime_export,
+       .get_sg_table = drm_gem_cma_prime_get_sg_table,
+       .vmap = vc4_prime_vmap,
+       .vunmap = drm_gem_cma_prime_vunmap,
+       .vm_ops = &vc4_vm_ops,
+};
+
 /**
  * vc4_gem_create_object - Implementation of driver->gem_create_object.
  * @dev: DRM device
@@ -400,6 +417,8 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
        vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
        mutex_unlock(&vc4->bo_lock);
 
+       bo->base.base.funcs = &vc4_gem_object_funcs;
+
        return &bo->base.base;
 }
 
@@ -684,7 +703,7 @@ struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
        return dmabuf;
 }
 
-vm_fault_t vc4_fault(struct vm_fault *vmf)
+static vm_fault_t vc4_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
index 482219fb4db2146fe042700fdddbc3e17c03088a..f04f5cc8c839d652b95752c8045f45f5af56e19a 100644 (file)
@@ -472,8 +472,10 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
 }
 
 static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
        struct drm_device *dev = crtc->dev;
 
@@ -499,8 +501,10 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct drm_device *dev = crtc->dev;
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
        struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
index a17aa1db11b6f7ee85a2eb5b717ca8c593c53d8f..8f10f609e4f8350d472b5d9f14b91ce1ffd7bd80 100644 (file)
@@ -140,12 +140,6 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
        kfree(vc4file);
 }
 
-static const struct vm_operations_struct vc4_vm_ops = {
-       .fault = vc4_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct file_operations vc4_drm_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@ -195,16 +189,10 @@ static struct drm_driver vc4_drm_driver = {
 #endif
 
        .gem_create_object = vc4_create_object,
-       .gem_free_object_unlocked = vc4_free_object,
-       .gem_vm_ops = &vc4_vm_ops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = vc4_prime_export,
-       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
        .gem_prime_import_sg_table = vc4_prime_import_sg_table,
-       .gem_prime_vmap = vc4_prime_vmap,
-       .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
        .gem_prime_mmap = vc4_prime_mmap,
 
        .dumb_create = vc4_dumb_create,
index 66d4fb16db8f01587eaf5a24253af626ca8d8bd1..7003e7f14a48149d00320f9d50899387a476ca8b 100644 (file)
@@ -801,7 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
-vm_fault_t vc4_fault(struct vm_fault *vmf);
 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
index eaf276978ee7fb79a145868071ddd91f86fbdd12..19aab4e7e2095c8fe8c0172c0dbbd4167fff0454 100644 (file)
@@ -1246,10 +1246,12 @@ reset_fifo_and_return:
        return ret;
 }
 
+static const struct component_ops vc4_dsi_ops;
 static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
                               struct mipi_dsi_device *device)
 {
        struct vc4_dsi *dsi = host_to_dsi(host);
+       int ret;
 
        dsi->lanes = device->lanes;
        dsi->channel = device->channel;
@@ -1284,6 +1286,12 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
                return 0;
        }
 
+       ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+       if (ret) {
+               mipi_dsi_host_unregister(&dsi->dsi_host);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -1662,7 +1670,6 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct vc4_dsi *dsi;
-       int ret;
 
        dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
        if (!dsi)
@@ -1670,26 +1677,10 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
        dev_set_drvdata(dev, dsi);
 
        dsi->pdev = pdev;
-
-       /* Note, the initialization sequence for DSI and panels is
-        * tricky.  The component bind above won't get past its
-        * -EPROBE_DEFER until the panel/bridge probes.  The
-        * panel/bridge will return -EPROBE_DEFER until it has a
-        * mipi_dsi_host to register its device to.  So, we register
-        * the host during pdev probe time, so vc4 as a whole can then
-        * -EPROBE_DEFER its component bind process until the panel
-        * successfully attaches.
-        */
        dsi->dsi_host.ops = &vc4_dsi_host_ops;
        dsi->dsi_host.dev = dev;
        mipi_dsi_host_register(&dsi->dsi_host);
 
-       ret = component_add(&pdev->dev, &vc4_dsi_ops);
-       if (ret) {
-               mipi_dsi_host_unregister(&dsi->dsi_host);
-               return ret;
-       }
-
        return 0;
 }
 
index 849dcafbfff17d127531595a43acef379b7dcc9b..e0e0b72ea65cad517337624f3b36915b6473450b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fb_cma_helper.h>
@@ -401,15 +402,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
 }
 
 static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
-                                 struct drm_crtc_state *old_state)
+                                 struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        drm_crtc_vblank_on(crtc);
        vc4_hvs_atomic_enable(crtc, old_state);
 }
 
 static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct drm_device *dev = crtc->dev;
 
        /* Disable vblank irq handling before crtc is disabled. */
index cb884c89006541d57d9408db4e5af1ece34e61e8..fa54a6d1403d0c9a5f9c50614fbd200682525212 100644 (file)
@@ -50,6 +50,8 @@
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
+static const struct drm_gem_object_funcs vgem_gem_object_funcs;
+
 static struct vgem_device {
        struct drm_device drm;
        struct platform_device *platform;
@@ -167,6 +169,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
        if (!obj)
                return ERR_PTR(-ENOMEM);
 
+       obj->base.funcs = &vgem_gem_object_funcs;
+
        ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
        if (ret) {
                kfree(obj);
@@ -401,12 +405,20 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
        return 0;
 }
 
+static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
+       .free = vgem_gem_free_object,
+       .pin = vgem_prime_pin,
+       .unpin = vgem_prime_unpin,
+       .get_sg_table = vgem_prime_get_sg_table,
+       .vmap = vgem_prime_vmap,
+       .vunmap = vgem_prime_vunmap,
+       .vm_ops = &vgem_gem_vm_ops,
+};
+
 static struct drm_driver vgem_driver = {
        .driver_features                = DRIVER_GEM | DRIVER_RENDER,
        .open                           = vgem_open,
        .postclose                      = vgem_postclose,
-       .gem_free_object_unlocked       = vgem_gem_free_object,
-       .gem_vm_ops                     = &vgem_gem_vm_ops,
        .ioctls                         = vgem_ioctls,
        .num_ioctls                     = ARRAY_SIZE(vgem_ioctls),
        .fops                           = &vgem_driver_fops,
@@ -415,13 +427,8 @@ static struct drm_driver vgem_driver = {
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_pin = vgem_prime_pin,
-       .gem_prime_unpin = vgem_prime_unpin,
        .gem_prime_import = vgem_prime_import,
        .gem_prime_import_sg_table = vgem_prime_import_sg_table,
-       .gem_prime_get_sg_table = vgem_prime_get_sg_table,
-       .gem_prime_vmap = vgem_prime_vmap,
-       .gem_prime_vunmap = vgem_prime_vunmap,
        .gem_prime_mmap = vgem_prime_mmap,
 
        .name   = DRIVER_NAME,
index 45cc9e90026012bd486cc0f1063921d195e069c4..dae1bacd86c1eacb72886b62da0ab2921c8963f0 100644 (file)
@@ -129,9 +129,9 @@ int via_mem_alloc(struct drm_device *dev, void *data,
        mutex_lock(&dev->struct_mutex);
        if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
                      dev_priv->agp_initialized)) {
+               mutex_unlock(&dev->struct_mutex);
                DRM_ERROR
                    ("Attempt to allocate from uninitialized memory manager.\n");
-               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
index 92aa2b3d349d99235035476cacb921f67e1d2a24..b99fa4a73b68e9df28d4f97dea09d19fc71b6a31 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \
        virtgpu_display.o virtgpu_vq.o \
        virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
        virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
index d5b0c543bd6d72b0fc55a3b873e227b1d8466f24..f336a8fa66667be267ca99b45127ae587315bb93 100644 (file)
@@ -42,15 +42,21 @@ static void virtio_add_int(struct seq_file *m, const char *name,
 
 static int virtio_gpu_features(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
 
        virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
        virtio_add_bool(m, "edid", vgdev->has_edid);
        virtio_add_bool(m, "indirect", vgdev->has_indirect);
        virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
+       virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
        virtio_add_int(m, "cap sets", vgdev->num_capsets);
        virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+       if (vgdev->host_visible_region.len) {
+               seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
+                          (unsigned long)vgdev->host_visible_region.addr,
+                          (unsigned long)vgdev->host_visible_region.len);
+       }
        return 0;
 }
 
@@ -66,9 +72,27 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int
+virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+       struct drm_printer p;
+
+       if (!vgdev->has_host_visible) {
+               seq_puts(m, "Host allocations not visible to guest\n");
+               return 0;
+       }
+
+       p = drm_seq_file_printer(m);
+       drm_mm_print(&vgdev->host_visible_mm, &p);
+       return 0;
+}
+
 static struct drm_info_list virtio_gpu_debugfs_list[] = {
        { "virtio-gpu-features", virtio_gpu_features },
        { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+       { "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm },
 };
 
 #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
index f84b7e61311bc3b98041c4f9633c150075775b9d..48b3194ee0514802acdaff69b6d36684a9776781 100644 (file)
@@ -95,12 +95,12 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 }
 
 static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
-                                         struct drm_crtc_state *old_state)
+                                         struct drm_atomic_state *state)
 {
 }
 
 static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
-                                          struct drm_crtc_state *old_state)
+                                          struct drm_atomic_state *state)
 {
        struct drm_device *dev = crtc->dev;
        struct virtio_gpu_device *vgdev = dev->dev_private;
index b039f493bda99fd02c2c27e2fa5a5a7f07581822..86330f1ade72ed3de037ffc0bc38556dd419fc91 100644 (file)
@@ -166,6 +166,7 @@ static unsigned int features[] = {
 #endif
        VIRTIO_GPU_F_EDID,
        VIRTIO_GPU_F_RESOURCE_UUID,
+       VIRTIO_GPU_F_RESOURCE_BLOB,
 };
 static struct virtio_driver virtio_gpu_driver = {
        .feature_table = features,
@@ -203,7 +204,6 @@ static struct drm_driver driver = {
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_mmap = drm_gem_prime_mmap,
-       .gem_prime_export = virtgpu_gem_prime_export,
        .gem_prime_import = virtgpu_gem_prime_import,
        .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
 
index 55c34b4fc3e99b30aae8edf788e17e3f16ed4e24..3c0e17212c336572e055a2eb6b6f49e195e2ae75 100644 (file)
@@ -35,6 +35,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_ioctl.h>
 #define DRIVER_MINOR 1
 #define DRIVER_PATCHLEVEL 0
 
-#define UUID_INITIALIZING 0
-#define UUID_INITIALIZED 1
-#define UUID_INITIALIZATION_FAILED 2
+#define STATE_INITIALIZING 0
+#define STATE_OK 1
+#define STATE_ERR 2
 
 struct virtio_gpu_object_params {
-       uint32_t format;
-       uint32_t width;
-       uint32_t height;
        unsigned long size;
        bool dumb;
        /* 3d */
        bool virgl;
+       bool blob;
+
+       /* classic resources only */
+       uint32_t format;
+       uint32_t width;
+       uint32_t height;
        uint32_t target;
        uint32_t bind;
        uint32_t depth;
@@ -68,6 +72,12 @@ struct virtio_gpu_object_params {
        uint32_t last_level;
        uint32_t nr_samples;
        uint32_t flags;
+
+       /* blob resources only */
+       uint32_t ctx_id;
+       uint32_t blob_mem;
+       uint32_t blob_flags;
+       uint64_t blob_id;
 };
 
 struct virtio_gpu_object {
@@ -75,6 +85,8 @@ struct virtio_gpu_object {
        uint32_t hw_res_handle;
        bool dumb;
        bool created;
+       bool host3d_blob, guest_blob;
+       uint32_t blob_mem, blob_flags;
 
        int uuid_state;
        uuid_t uuid;
@@ -88,9 +100,19 @@ struct virtio_gpu_object_shmem {
        uint32_t mapped;
 };
 
+struct virtio_gpu_object_vram {
+       struct virtio_gpu_object base;
+       uint32_t map_state;
+       uint32_t map_info;
+       struct drm_mm_node vram_node;
+};
+
 #define to_virtio_gpu_shmem(virtio_gpu_object) \
        container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
 
+#define to_virtio_gpu_vram(virtio_gpu_object) \
+       container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base)
+
 struct virtio_gpu_object_array {
        struct ww_acquire_ctx ticket;
        struct list_head next;
@@ -208,6 +230,10 @@ struct virtio_gpu_device {
        bool has_edid;
        bool has_indirect;
        bool has_resource_assign_uuid;
+       bool has_resource_blob;
+       bool has_host_visible;
+       struct virtio_shm_region host_visible_region;
+       struct drm_mm host_visible_mm;
 
        struct work_struct config_changed_work;
 
@@ -219,8 +245,10 @@ struct virtio_gpu_device {
        uint32_t num_capsets;
        struct list_head cap_cache;
 
-       /* protects resource state when exporting */
+       /* protects uuid state when exporting */
        spinlock_t resource_export_lock;
+       /* protects map state and host_visible_mm */
+       spinlock_t host_visible_lock;
 };
 
 struct virtio_gpu_fpriv {
@@ -229,8 +257,8 @@ struct virtio_gpu_fpriv {
        struct mutex context_lock;
 };
 
-/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 10
+/* virtio_ioctl.c */
+#define DRM_VIRTIO_NUM_IOCTLS 11
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
 
@@ -323,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
+                                         uint32_t stride,
+                                         uint32_t layer_stride,
                                          struct drm_virtgpu_3d_box *box,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
+                                       uint32_t stride,
+                                       uint32_t layer_stride,
                                        struct drm_virtgpu_3d_box *box,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence);
@@ -351,6 +383,26 @@ int
 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
                                    struct virtio_gpu_object_array *objs);
 
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+                      struct virtio_gpu_object_array *objs, uint64_t offset);
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+                         struct virtio_gpu_object *bo);
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+                                   struct virtio_gpu_object *bo,
+                                   struct virtio_gpu_object_params *params,
+                                   struct virtio_gpu_mem_entry *ents,
+                                   uint32_t nents);
+void
+virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+                               uint32_t scanout_id,
+                               struct virtio_gpu_object *bo,
+                               struct drm_framebuffer *fb,
+                               uint32_t width, uint32_t height,
+                               uint32_t x, uint32_t y);
+
 /* virtgpu_display.c */
 int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
@@ -381,7 +433,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 
 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
 
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+                              uint32_t *resid);
 /* virtgpu_prime.c */
+int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+                                   struct virtio_gpu_object *bo);
 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
                                         int flags);
 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
@@ -395,4 +451,9 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 /* virtgpu_debugfs.c */
 void virtio_gpu_debugfs_init(struct drm_minor *minor);
 
+/* virtgpu_vram.c */
+bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
+int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
+                          struct virtio_gpu_object_params *params,
+                          struct virtio_gpu_object **bo_ptr);
 #endif
index c8da7adc6b307b5c690335d5edec7bc27eb9a543..5417f365d1a39ece3ba02c19af67ab30361aacfa 100644 (file)
 
 #include "virtgpu_drv.h"
 
+#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
+                                   VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
+                                   VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+
 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -208,11 +212,20 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 
        switch (param->param) {
        case VIRTGPU_PARAM_3D_FEATURES:
-               value = vgdev->has_virgl_3d == true ? 1 : 0;
+               value = vgdev->has_virgl_3d ? 1 : 0;
                break;
        case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
                value = 1;
                break;
+       case VIRTGPU_PARAM_RESOURCE_BLOB:
+               value = vgdev->has_resource_blob ? 1 : 0;
+               break;
+       case VIRTGPU_PARAM_HOST_VISIBLE:
+               value = vgdev->has_host_visible ? 1 : 0;
+               break;
+       case VIRTGPU_PARAM_CROSS_DEVICE:
+               value = vgdev->has_resource_assign_uuid ? 1 : 0;
+               break;
        default:
                return -EINVAL;
        }
@@ -301,6 +314,9 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
 
        ri->size = qobj->base.base.size;
        ri->res_handle = qobj->hw_res_handle;
+       if (qobj->host3d_blob || qobj->guest_blob)
+               ri->blob_mem = qobj->blob_mem;
+
        drm_gem_object_put(gobj);
        return 0;
 }
@@ -312,6 +328,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct drm_virtgpu_3d_transfer_from_host *args = data;
+       struct virtio_gpu_object *bo;
        struct virtio_gpu_object_array *objs;
        struct virtio_gpu_fence *fence;
        int ret;
@@ -325,6 +342,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
        if (objs == NULL)
                return -ENOENT;
 
+       bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+       if (bo->guest_blob && !bo->host3d_blob) {
+               ret = -EINVAL;
+               goto err_put_free;
+       }
+
+       if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+               ret = -EINVAL;
+               goto err_put_free;
+       }
+
        ret = virtio_gpu_array_lock_resv(objs);
        if (ret != 0)
                goto err_put_free;
@@ -334,9 +362,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
                ret = -ENOMEM;
                goto err_unlock;
        }
+
        virtio_gpu_cmd_transfer_from_host_3d
-               (vgdev, vfpriv->ctx_id, offset, args->level,
-                &args->box, objs, fence);
+               (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
+                args->layer_stride, &args->box, objs, fence);
        dma_fence_put(&fence->f);
        virtio_gpu_notify(vgdev);
        return 0;
@@ -354,6 +383,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct drm_virtgpu_3d_transfer_to_host *args = data;
+       struct virtio_gpu_object *bo;
        struct virtio_gpu_object_array *objs;
        struct virtio_gpu_fence *fence;
        int ret;
@@ -363,6 +393,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
        if (objs == NULL)
                return -ENOENT;
 
+       bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+       if (bo->guest_blob && !bo->host3d_blob) {
+               ret = -EINVAL;
+               goto err_put_free;
+       }
+
        if (!vgdev->has_virgl_3d) {
                virtio_gpu_cmd_transfer_to_host_2d
                        (vgdev, offset,
@@ -370,6 +406,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
                         objs, NULL);
        } else {
                virtio_gpu_create_context(dev, file);
+
+               if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+                       ret = -EINVAL;
+                       goto err_put_free;
+               }
+
                ret = virtio_gpu_array_lock_resv(objs);
                if (ret != 0)
                        goto err_put_free;
@@ -381,8 +423,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 
                virtio_gpu_cmd_transfer_to_host_3d
                        (vgdev,
-                        vfpriv ? vfpriv->ctx_id : 0, offset,
-                        args->level, &args->box, objs, fence);
+                        vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
+                        args->stride, args->layer_stride, &args->box, objs,
+                        fence);
                dma_fence_put(&fence->f);
        }
        virtio_gpu_notify(vgdev);
@@ -491,6 +534,134 @@ copy_exit:
        return 0;
 }
 
+static int verify_blob(struct virtio_gpu_device *vgdev,
+                      struct virtio_gpu_fpriv *vfpriv,
+                      struct virtio_gpu_object_params *params,
+                      struct drm_virtgpu_resource_create_blob *rc_blob,
+                      bool *guest_blob, bool *host3d_blob)
+{
+       if (!vgdev->has_resource_blob)
+               return -EINVAL;
+
+       if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
+           !rc_blob->blob_flags)
+               return -EINVAL;
+
+       if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+               if (!vgdev->has_resource_assign_uuid)
+                       return -EINVAL;
+       }
+
+       switch (rc_blob->blob_mem) {
+       case VIRTGPU_BLOB_MEM_GUEST:
+               *guest_blob = true;
+               break;
+       case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
+               *guest_blob = true;
+               fallthrough;
+       case VIRTGPU_BLOB_MEM_HOST3D:
+               *host3d_blob = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (*host3d_blob) {
+               if (!vgdev->has_virgl_3d)
+                       return -EINVAL;
+
+               /* Must be dword aligned. */
+               if (rc_blob->cmd_size % 4 != 0)
+                       return -EINVAL;
+
+               params->ctx_id = vfpriv->ctx_id;
+               params->blob_id = rc_blob->blob_id;
+       } else {
+               if (rc_blob->blob_id != 0)
+                       return -EINVAL;
+
+               if (rc_blob->cmd_size != 0)
+                       return -EINVAL;
+       }
+
+       params->blob_mem = rc_blob->blob_mem;
+       params->size = rc_blob->size;
+       params->blob = true;
+       params->blob_flags = rc_blob->blob_flags;
+       return 0;
+}
+
+static int virtio_gpu_resource_create_blob(struct drm_device *dev,
+                                          void *data, struct drm_file *file)
+{
+       int ret = 0;
+       uint32_t handle = 0;
+       bool guest_blob = false;
+       bool host3d_blob = false;
+       struct drm_gem_object *obj;
+       struct virtio_gpu_object *bo;
+       struct virtio_gpu_object_params params = { 0 };
+       struct virtio_gpu_device *vgdev = dev->dev_private;
+       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+       struct drm_virtgpu_resource_create_blob *rc_blob = data;
+
+       if (verify_blob(vgdev, vfpriv, &params, rc_blob,
+                       &guest_blob, &host3d_blob))
+               return -EINVAL;
+
+       if (vgdev->has_virgl_3d)
+               virtio_gpu_create_context(dev, file);
+
+       if (rc_blob->cmd_size) {
+               void *buf;
+
+               buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
+                                 rc_blob->cmd_size);
+
+               if (IS_ERR(buf))
+                       return PTR_ERR(buf);
+
+               virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
+                                     vfpriv->ctx_id, NULL, NULL);
+       }
+
+       if (guest_blob)
+               ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
+       else if (!guest_blob && host3d_blob)
+               ret = virtio_gpu_vram_create(vgdev, &params, &bo);
+       else
+               return -EINVAL;
+
+       if (ret < 0)
+               return ret;
+
+       bo->guest_blob = guest_blob;
+       bo->host3d_blob = host3d_blob;
+       bo->blob_mem = rc_blob->blob_mem;
+       bo->blob_flags = rc_blob->blob_flags;
+
+       obj = &bo->base.base;
+       if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+               ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+               if (ret) {
+                       drm_gem_object_release(obj);
+                       return ret;
+               }
+       }
+
+       ret = drm_gem_handle_create(file, obj, &handle);
+       if (ret) {
+               drm_gem_object_release(obj);
+               return ret;
+       }
+       drm_gem_object_put(obj);
+
+       rc_blob->res_handle = bo->hw_res_handle;
+       rc_blob->bo_handle = handle;
+
+       return 0;
+}
+
 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
        DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
                          DRM_RENDER_ALLOW),
@@ -523,4 +694,8 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
 
        DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
                          DRM_RENDER_ALLOW),
+
+       DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
+                         virtio_gpu_resource_create_blob,
+                         DRM_RENDER_ALLOW),
 };
index eed57a9313098d652099e98132429a8b06f6aa7f..b4ec479c32cda13cfd5e125c70878104afb5b877 100644 (file)
@@ -121,6 +121,7 @@ int virtio_gpu_init(struct drm_device *dev)
 
        spin_lock_init(&vgdev->display_info_lock);
        spin_lock_init(&vgdev->resource_export_lock);
+       spin_lock_init(&vgdev->host_visible_lock);
        ida_init(&vgdev->ctx_id_ida);
        ida_init(&vgdev->resource_ida);
        init_waitqueue_head(&vgdev->resp_wq);
@@ -152,10 +153,33 @@ int virtio_gpu_init(struct drm_device *dev)
        if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
                vgdev->has_resource_assign_uuid = true;
        }
+       if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+               vgdev->has_resource_blob = true;
+       }
+       if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
+                                 VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
+               if (!devm_request_mem_region(&vgdev->vdev->dev,
+                                            vgdev->host_visible_region.addr,
+                                            vgdev->host_visible_region.len,
+                                            dev_name(&vgdev->vdev->dev))) {
+                       DRM_ERROR("Could not reserve host visible region\n");
+                       goto err_vqs;
+               }
+
+               DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
+                        (unsigned long)vgdev->host_visible_region.addr,
+                        (unsigned long)vgdev->host_visible_region.len);
+               vgdev->has_host_visible = true;
+               drm_mm_init(&vgdev->host_visible_mm,
+                           (unsigned long)vgdev->host_visible_region.addr,
+                           (unsigned long)vgdev->host_visible_region.len);
+       }
 
-       DRM_INFO("features: %cvirgl %cedid\n",
-                vgdev->has_virgl_3d ? '+' : '-',
-                vgdev->has_edid     ? '+' : '-');
+       DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+                vgdev->has_virgl_3d    ? '+' : '-',
+                vgdev->has_edid        ? '+' : '-',
+                vgdev->has_resource_blob ? '+' : '-',
+                vgdev->has_host_visible ? '+' : '-');
 
        ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
        if (ret) {
@@ -242,6 +266,10 @@ void virtio_gpu_release(struct drm_device *dev)
        virtio_gpu_modeset_fini(vgdev);
        virtio_gpu_free_vbufs(vgdev);
        virtio_gpu_cleanup_cap_cache(vgdev);
+
+       if (vgdev->has_host_visible)
+               drm_mm_takedown(&vgdev->host_visible_mm);
+
        kfree(vgdev->capsets);
        kfree(vgdev);
 }
index 00d6b95e259d66ff1e1c258397f8b1f05454b41f..2d3aa7baffe4013a46966460ddd19a276d887d7f 100644 (file)
@@ -31,8 +31,7 @@
 static int virtio_gpu_virglrenderer_workaround = 1;
 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
 
-static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
-                                      uint32_t *resid)
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
 {
        if (virtio_gpu_virglrenderer_workaround) {
                /*
@@ -84,6 +83,18 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
                }
 
                drm_gem_shmem_free_object(&bo->base.base);
+       } else if (virtio_gpu_is_vram(bo)) {
+               struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+
+               spin_lock(&vgdev->host_visible_lock);
+               if (drm_mm_node_allocated(&vram->vram_node))
+                       drm_mm_remove_node(&vram->vram_node);
+
+               spin_unlock(&vgdev->host_visible_lock);
+
+               drm_gem_free_mmap_offset(&vram->base.base.base);
+               drm_gem_object_release(&vram->base.base.base);
+               kfree(vram);
        }
 }
 
@@ -107,6 +118,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
        .close = virtio_gpu_gem_object_close,
 
        .print_info = drm_gem_shmem_print_info,
+       .export = virtgpu_gem_prime_export,
        .pin = drm_gem_shmem_pin,
        .unpin = drm_gem_shmem_unpin,
        .get_sg_table = drm_gem_shmem_get_sg_table,
@@ -234,21 +246,24 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
                        goto err_put_objs;
        }
 
-       if (params->virgl) {
-               virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
-                                                 objs, fence);
-       } else {
-               virtio_gpu_cmd_create_resource(vgdev, bo, params,
-                                              objs, fence);
-       }
-
        ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
        if (ret != 0) {
                virtio_gpu_free_object(&shmem_obj->base);
                return ret;
        }
 
-       virtio_gpu_object_attach(vgdev, bo, ents, nents);
+       if (params->blob) {
+               virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
+                                                   ents, nents);
+       } else if (params->virgl) {
+               virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+                                                 objs, fence);
+               virtio_gpu_object_attach(vgdev, bo, ents, nents);
+       } else {
+               virtio_gpu_cmd_create_resource(vgdev, bo, params,
+                                              objs, fence);
+               virtio_gpu_object_attach(vgdev, bo, ents, nents);
+       }
 
        *bo_ptr = bo;
        return 0;
index 6a311cd934403914e87ae3b0b2acafcbe40c4032..42ac08ed1442ee4ad078a821143bf1bdb9b997ff 100644 (file)
@@ -174,12 +174,23 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
                          plane->state->src_h >> 16,
                          plane->state->src_x >> 16,
                          plane->state->src_y >> 16);
-               virtio_gpu_cmd_set_scanout(vgdev, output->index,
-                                          bo->hw_res_handle,
-                                          plane->state->src_w >> 16,
-                                          plane->state->src_h >> 16,
-                                          plane->state->src_x >> 16,
-                                          plane->state->src_y >> 16);
+
+               if (bo->host3d_blob || bo->guest_blob) {
+                       virtio_gpu_cmd_set_scanout_blob
+                                               (vgdev, output->index, bo,
+                                                plane->state->fb,
+                                                plane->state->src_w >> 16,
+                                                plane->state->src_h >> 16,
+                                                plane->state->src_x >> 16,
+                                                plane->state->src_y >> 16);
+               } else {
+                       virtio_gpu_cmd_set_scanout(vgdev, output->index,
+                                                  bo->hw_res_handle,
+                                                  plane->state->src_w >> 16,
+                                                  plane->state->src_h >> 16,
+                                                  plane->state->src_x >> 16,
+                                                  plane->state->src_y >> 16);
+               }
        }
 
        virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
index acd14ef73d5631b1b4c3feb082881bcdec89e3a2..1ef1e2f2263332bd5eb0f61f01e46261bf8a3a41 100644 (file)
@@ -34,8 +34,8 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
        struct virtio_gpu_device *vgdev = obj->dev->dev_private;
 
-       wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING);
-       if (bo->uuid_state != UUID_INITIALIZED)
+       wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
+       if (bo->uuid_state != STATE_OK)
                return -ENODEV;
 
        uuid_copy(uuid, &bo->uuid);
@@ -59,6 +59,24 @@ const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
        .get_uuid = virtgpu_virtio_get_uuid,
 };
 
+int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+                                   struct virtio_gpu_object *bo)
+{
+       int ret;
+       struct virtio_gpu_object_array *objs;
+
+       objs = virtio_gpu_array_alloc(1);
+       if (!objs)
+               return -ENOMEM;
+
+       virtio_gpu_array_add_obj(objs, &bo->base.base);
+       ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
                                         int flags)
 {
@@ -66,22 +84,20 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
        struct drm_device *dev = obj->dev;
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-       struct virtio_gpu_object_array *objs;
        int ret = 0;
+       bool blob = bo->host3d_blob || bo->guest_blob;
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
-       if (vgdev->has_resource_assign_uuid) {
-               objs = virtio_gpu_array_alloc(1);
-               if (!objs)
-                       return ERR_PTR(-ENOMEM);
-               virtio_gpu_array_add_obj(objs, &bo->base.base);
-
-               ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
-               if (ret)
-                       return ERR_PTR(ret);
-               virtio_gpu_notify(vgdev);
-       } else {
-               bo->uuid_state = UUID_INITIALIZATION_FAILED;
+       if (!blob) {
+               if (vgdev->has_resource_assign_uuid) {
+                       ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+                       if (ret)
+                               return ERR_PTR(ret);
+
+                       virtio_gpu_notify(vgdev);
+               } else {
+                       bo->uuid_state = STATE_ERR;
+               }
        }
 
        exp_info.ops = &virtgpu_dmabuf_ops.ops;
index 07945ca238e2d93741a7df8db1c21c56f56e7f74..857f730747b61c099484909b0fcf88bbf82b07ad 100644 (file)
@@ -1016,6 +1016,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
+                                       uint32_t stride,
+                                       uint32_t layer_stride,
                                        struct drm_virtgpu_3d_box *box,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence)
@@ -1024,11 +1026,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
-       struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
-       if (use_dma_api)
+       if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+               struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
                dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
                                            shmem->pages, DMA_TO_DEVICE);
+       }
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1041,6 +1044,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        convert_to_hw_box(&cmd_p->box, box);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
 
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
@@ -1048,6 +1053,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
+                                         uint32_t stride,
+                                         uint32_t layer_stride,
                                          struct drm_virtgpu_3d_box *box,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence)
@@ -1067,6 +1074,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
        convert_to_hw_box(&cmd_p->box, box);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
 
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
@@ -1125,14 +1134,14 @@ static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
        uint32_t resp_type = le32_to_cpu(resp->hdr.type);
 
        spin_lock(&vgdev->resource_export_lock);
-       WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+       WARN_ON(obj->uuid_state != STATE_INITIALIZING);
 
        if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
-           obj->uuid_state == UUID_INITIALIZING) {
-               memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
-               obj->uuid_state = UUID_INITIALIZED;
+           obj->uuid_state == STATE_INITIALIZING) {
+               import_uuid(&obj->uuid, resp->uuid);
+               obj->uuid_state = STATE_OK;
        } else {
-               obj->uuid_state = UUID_INITIALIZATION_FAILED;
+               obj->uuid_state = STATE_ERR;
        }
        spin_unlock(&vgdev->resource_export_lock);
 
@@ -1151,7 +1160,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
        resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
        if (!resp_buf) {
                spin_lock(&vgdev->resource_export_lock);
-               bo->uuid_state = UUID_INITIALIZATION_FAILED;
+               bo->uuid_state = STATE_ERR;
                spin_unlock(&vgdev->resource_export_lock);
                virtio_gpu_array_put_free(objs);
                return -ENOMEM;
@@ -1169,3 +1178,134 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
        return 0;
 }
+
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+                                          struct virtio_gpu_vbuffer *vbuf)
+{
+       struct virtio_gpu_object *bo =
+               gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+       struct virtio_gpu_resp_map_info *resp =
+               (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+       uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+       spin_lock(&vgdev->host_visible_lock);
+
+       if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+               vram->map_info = resp->map_info;
+               vram->map_state = STATE_OK;
+       } else {
+               vram->map_state = STATE_ERR;
+       }
+
+       spin_unlock(&vgdev->host_visible_lock);
+       wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+                      struct virtio_gpu_object_array *objs, uint64_t offset)
+{
+       struct virtio_gpu_resource_map_blob *cmd_p;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+       struct virtio_gpu_vbuffer *vbuf;
+       struct virtio_gpu_resp_map_info *resp_buf;
+
+       resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+       if (!resp_buf) {
+               virtio_gpu_array_put_free(objs);
+               return -ENOMEM;
+       }
+
+       cmd_p = virtio_gpu_alloc_cmd_resp
+               (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+                sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->offset = cpu_to_le64(offset);
+       vbuf->objs = objs;
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       return 0;
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+                         struct virtio_gpu_object *bo)
+{
+       struct virtio_gpu_resource_unmap_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+                                   struct virtio_gpu_object *bo,
+                                   struct virtio_gpu_object_params *params,
+                                   struct virtio_gpu_mem_entry *ents,
+                                   uint32_t nents)
+{
+       struct virtio_gpu_resource_create_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+       cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
+       cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
+       cmd_p->blob_id = cpu_to_le64(params->blob_id);
+       cmd_p->size = cpu_to_le64(params->size);
+       cmd_p->nr_entries = cpu_to_le32(nents);
+
+       vbuf->data_buf = ents;
+       vbuf->data_size = sizeof(*ents) * nents;
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       bo->created = true;
+}
+
+void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+                                    uint32_t scanout_id,
+                                    struct virtio_gpu_object *bo,
+                                    struct drm_framebuffer *fb,
+                                    uint32_t width, uint32_t height,
+                                    uint32_t x, uint32_t y)
+{
+       uint32_t i;
+       struct virtio_gpu_set_scanout_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+       uint32_t format = virtio_gpu_translate_format(fb->format->format);
+
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->scanout_id = cpu_to_le32(scanout_id);
+
+       cmd_p->format = cpu_to_le32(format);
+       cmd_p->width  = cpu_to_le32(fb->width);
+       cmd_p->height = cpu_to_le32(fb->height);
+
+       for (i = 0; i < 4; i++) {
+               cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
+               cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
+       }
+
+       cmd_p->r.width = cpu_to_le32(width);
+       cmd_p->r.height = cpu_to_le32(height);
+       cmd_p->r.x = cpu_to_le32(x);
+       cmd_p->r.y = cpu_to_le32(y);
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
new file mode 100644 (file)
index 0000000..23c21bc
--- /dev/null
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "virtgpu_drv.h"
+
+static void virtio_gpu_vram_free(struct drm_gem_object *obj)
+{
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+       struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+       bool unmap;
+
+       if (bo->created) {
+               spin_lock(&vgdev->host_visible_lock);
+               unmap = drm_mm_node_allocated(&vram->vram_node);
+               spin_unlock(&vgdev->host_visible_lock);
+
+               if (unmap)
+                       virtio_gpu_cmd_unmap(vgdev, bo);
+
+               virtio_gpu_cmd_unref_resource(vgdev, bo);
+               virtio_gpu_notify(vgdev);
+               return;
+       }
+}
+
+static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
+                               struct vm_area_struct *vma)
+{
+       int ret;
+       struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+       unsigned long vm_size = vma->vm_end - vma->vm_start;
+
+       if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+               return -EINVAL;
+
+       wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
+       if (vram->map_state != STATE_OK)
+               return -EINVAL;
+
+       vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+       vma->vm_ops = &virtio_gpu_vram_vm_ops;
+
+       if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       /* Partial mappings of GEM buffers don't happen much in practice. */
+       if (vm_size != vram->vram_node.size)
+               return -EINVAL;
+
+       ret = io_remap_pfn_range(vma, vma->vm_start,
+                                vram->vram_node.start >> PAGE_SHIFT,
+                                vm_size, vma->vm_page_prot);
+       return ret;
+}
+
+static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
+       .open = virtio_gpu_gem_object_open,
+       .close = virtio_gpu_gem_object_close,
+       .free = virtio_gpu_vram_free,
+       .mmap = virtio_gpu_vram_mmap,
+};
+
+bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
+{
+       return bo->base.base.funcs == &virtio_gpu_vram_funcs;
+}
+
+static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
+{
+       int ret;
+       uint64_t offset;
+       struct virtio_gpu_object_array *objs;
+       struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+
+       if (!vgdev->has_host_visible)
+               return -EINVAL;
+
+       spin_lock(&vgdev->host_visible_lock);
+       ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
+                                bo->base.base.size);
+       spin_unlock(&vgdev->host_visible_lock);
+
+       if (ret)
+               return ret;
+
+       objs = virtio_gpu_array_alloc(1);
+       if (!objs) {
+               ret = -ENOMEM;
+               goto err_remove_node;
+       }
+
+       virtio_gpu_array_add_obj(objs, &bo->base.base);
+       /*TODO: Add an error checking helper function in drm_mm.h */
+       offset = vram->vram_node.start - vgdev->host_visible_region.addr;
+
+       ret = virtio_gpu_cmd_map(vgdev, objs, offset);
+       if (ret) {
+               virtio_gpu_array_put_free(objs);
+               goto err_remove_node;
+       }
+
+       return 0;
+
+err_remove_node:
+       spin_lock(&vgdev->host_visible_lock);
+       drm_mm_remove_node(&vram->vram_node);
+       spin_unlock(&vgdev->host_visible_lock);
+       return ret;
+}
+
+int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
+                          struct virtio_gpu_object_params *params,
+                          struct virtio_gpu_object **bo_ptr)
+{
+       struct drm_gem_object *obj;
+       struct virtio_gpu_object_vram *vram;
+       int ret;
+
+       vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+       if (!vram)
+               return -ENOMEM;
+
+       obj = &vram->base.base.base;
+       obj->funcs = &virtio_gpu_vram_funcs;
+       drm_gem_private_object_init(vgdev->ddev, obj, params->size);
+
+       /* Create fake offset */
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret) {
+               kfree(vram);
+               return ret;
+       }
+
+       ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
+       if (ret) {
+               kfree(vram);
+               return ret;
+       }
+
+       virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
+                                           0);
+       if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
+               ret = virtio_gpu_vram_map(&vram->base);
+               if (ret) {
+                       virtio_gpu_vram_free(obj);
+                       return ret;
+               }
+       }
+
+       *bo_ptr = &vram->base;
+       return 0;
+}
index 333d3cead0e38dd78192cfb676abb7f4dd471fec..72f779cbfeddb2baa1964f6e94b8cbb2502bc9d4 100644 (file)
@@ -4,7 +4,6 @@ vkms-y := \
        vkms_plane.o \
        vkms_output.o \
        vkms_crtc.o \
-       vkms_gem.o \
        vkms_composer.o \
        vkms_writeback.o
 
index 33c031f27c2c11848a54e3039f82339ca2fc393a..66c6842d70db5de37f54642fc2bba85334507903 100644 (file)
@@ -5,6 +5,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_vblank.h>
 
 #include "vkms_drv.h"
@@ -129,15 +130,15 @@ static void compose_cursor(struct vkms_composer *cursor_composer,
                           void *vaddr_out)
 {
        struct drm_gem_object *cursor_obj;
-       struct vkms_gem_object *cursor_vkms_obj;
+       struct drm_gem_shmem_object *cursor_shmem_obj;
 
        cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
-       cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
+       cursor_shmem_obj = to_drm_gem_shmem_obj(cursor_obj);
 
-       if (WARN_ON(!cursor_vkms_obj->vaddr))
+       if (WARN_ON(!cursor_shmem_obj->vaddr))
                return;
 
-       blend(vaddr_out, cursor_vkms_obj->vaddr,
+       blend(vaddr_out, cursor_shmem_obj->vaddr,
              primary_composer, cursor_composer);
 }
 
@@ -147,20 +148,20 @@ static int compose_planes(void **vaddr_out,
 {
        struct drm_framebuffer *fb = &primary_composer->fb;
        struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
-       struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+       struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj);
 
        if (!*vaddr_out) {
-               *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
+               *vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL);
                if (!*vaddr_out) {
                        DRM_ERROR("Cannot allocate memory for output frame.");
                        return -ENOMEM;
                }
        }
 
-       if (WARN_ON(!vkms_obj->vaddr))
+       if (WARN_ON(!shmem_obj->vaddr))
                return -EINVAL;
 
-       memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
+       memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size);
 
        if (cursor_composer)
                compose_cursor(cursor_composer, primary_composer, *vaddr_out);
index 09c012d54d58f646c4f4b3c42818de52ad0877cd..e43e4e1b268a331f4f190a3290ce9e34a29d2942 100644 (file)
@@ -214,13 +214,13 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
 }
 
 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
 {
        drm_crtc_vblank_on(crtc);
 }
 
 static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+                                    struct drm_atomic_state *state)
 {
        drm_crtc_vblank_off(crtc);
 }
index cb0b6230c22cefbfbc1488fb839304ae4f7b260a..25faba5aac087d566b93abab9ed940bcd200f6a1 100644 (file)
@@ -23,6 +23,7 @@
 #include <drm/drm_ioctl.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_vblank.h>
 
 #include "vkms_drv.h"
@@ -39,23 +40,7 @@ bool enable_cursor = true;
 module_param_named(enable_cursor, enable_cursor, bool, 0444);
 MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
 
-static const struct file_operations vkms_driver_fops = {
-       .owner          = THIS_MODULE,
-       .open           = drm_open,
-       .mmap           = drm_gem_mmap,
-       .unlocked_ioctl = drm_ioctl,
-       .compat_ioctl   = drm_compat_ioctl,
-       .poll           = drm_poll,
-       .read           = drm_read,
-       .llseek         = no_llseek,
-       .release        = drm_release,
-};
-
-static const struct vm_operations_struct vkms_gem_vm_ops = {
-       .fault = vkms_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
+DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
 
 static void vkms_release(struct drm_device *dev)
 {
@@ -97,11 +82,8 @@ static struct drm_driver vkms_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
        .release                = vkms_release,
        .fops                   = &vkms_driver_fops,
-       .dumb_create            = vkms_dumb_create,
-       .gem_vm_ops             = &vkms_gem_vm_ops,
-       .gem_free_object_unlocked = vkms_gem_free_object,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
-       .gem_prime_import_sg_table = vkms_prime_import_sg_table,
+       .gem_create_object = drm_gem_shmem_create_object_cached,
+       DRM_GEM_SHMEM_DRIVER_OPS,
 
        .name                   = DRIVER_NAME,
        .desc                   = DRIVER_DESC,
@@ -132,7 +114,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
        dev->mode_config.max_height = YRES_MAX;
        dev->mode_config.cursor_width = 512;
        dev->mode_config.cursor_height = 512;
-       dev->mode_config.preferred_depth = 24;
+       dev->mode_config.preferred_depth = 32;
        dev->mode_config.helper_private = &vkms_mode_config_helpers;
 
        return vkms_output_init(vkmsdev, 0);
@@ -184,6 +166,8 @@ static int __init vkms_init(void)
        if (ret)
                goto out_devres;
 
+       drm_fbdev_generic_setup(&vkms_device->drm, 0);
+
        return 0;
 
 out_devres:
index 380a8f27e15603455530e3389c67ee6d36927f25..5ed91ff08cb326c05ffab3de855d35300578e6ed 100644 (file)
@@ -88,23 +88,12 @@ struct vkms_device {
        struct vkms_output output;
 };
 
-struct vkms_gem_object {
-       struct drm_gem_object gem;
-       struct mutex pages_lock; /* Page lock used in page fault handler */
-       struct page **pages;
-       unsigned int vmap_count;
-       void *vaddr;
-};
-
 #define drm_crtc_to_vkms_output(target) \
        container_of(target, struct vkms_output, crtc)
 
 #define drm_device_to_vkms_device(target) \
        container_of(target, struct vkms_device, drm)
 
-#define drm_gem_to_vkms_gem(target)\
-       container_of(target, struct vkms_gem_object, gem)
-
 #define to_vkms_crtc_state(target)\
        container_of(target, struct vkms_crtc_state, base)
 
@@ -120,24 +109,6 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index);
 struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
                                  enum drm_plane_type type, int index);
 
-/* Gem stuff */
-vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
-
-int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
-                    struct drm_mode_create_dumb *args);
-
-void vkms_gem_free_object(struct drm_gem_object *obj);
-
-int vkms_gem_vmap(struct drm_gem_object *obj);
-
-void vkms_gem_vunmap(struct drm_gem_object *obj);
-
-/* Prime */
-struct drm_gem_object *
-vkms_prime_import_sg_table(struct drm_device *dev,
-                          struct dma_buf_attachment *attach,
-                          struct sg_table *sg);
-
 /* CRC Support */
 const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
                                        size_t *count);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
deleted file mode 100644 (file)
index a017fc5..0000000
+++ /dev/null
@@ -1,248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-
-#include <linux/dma-buf.h>
-#include <linux/shmem_fs.h>
-#include <linux/vmalloc.h>
-#include <drm/drm_prime.h>
-
-#include "vkms_drv.h"
-
-static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
-                                                u64 size)
-{
-       struct vkms_gem_object *obj;
-       int ret;
-
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-       if (!obj)
-               return ERR_PTR(-ENOMEM);
-
-       size = roundup(size, PAGE_SIZE);
-       ret = drm_gem_object_init(dev, &obj->gem, size);
-       if (ret) {
-               kfree(obj);
-               return ERR_PTR(ret);
-       }
-
-       mutex_init(&obj->pages_lock);
-
-       return obj;
-}
-
-void vkms_gem_free_object(struct drm_gem_object *obj)
-{
-       struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
-                                                  gem);
-
-       WARN_ON(gem->pages);
-       WARN_ON(gem->vaddr);
-
-       mutex_destroy(&gem->pages_lock);
-       drm_gem_object_release(obj);
-       kfree(gem);
-}
-
-vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       struct vkms_gem_object *obj = vma->vm_private_data;
-       unsigned long vaddr = vmf->address;
-       pgoff_t page_offset;
-       loff_t num_pages;
-       vm_fault_t ret = VM_FAULT_SIGBUS;
-
-       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-       num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
-
-       if (page_offset > num_pages)
-               return VM_FAULT_SIGBUS;
-
-       mutex_lock(&obj->pages_lock);
-       if (obj->pages) {
-               get_page(obj->pages[page_offset]);
-               vmf->page = obj->pages[page_offset];
-               ret = 0;
-       }
-       mutex_unlock(&obj->pages_lock);
-       if (ret) {
-               struct page *page;
-               struct address_space *mapping;
-
-               mapping = file_inode(obj->gem.filp)->i_mapping;
-               page = shmem_read_mapping_page(mapping, page_offset);
-
-               if (!IS_ERR(page)) {
-                       vmf->page = page;
-                       ret = 0;
-               } else {
-                       switch (PTR_ERR(page)) {
-                       case -ENOSPC:
-                       case -ENOMEM:
-                               ret = VM_FAULT_OOM;
-                               break;
-                       case -EBUSY:
-                               ret = VM_FAULT_RETRY;
-                               break;
-                       case -EFAULT:
-                       case -EINVAL:
-                               ret = VM_FAULT_SIGBUS;
-                               break;
-                       default:
-                               WARN_ON(PTR_ERR(page));
-                               ret = VM_FAULT_SIGBUS;
-                               break;
-                       }
-               }
-       }
-       return ret;
-}
-
-static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
-                                             struct drm_file *file,
-                                             u32 *handle,
-                                             u64 size)
-{
-       struct vkms_gem_object *obj;
-       int ret;
-
-       if (!file || !dev || !handle)
-               return ERR_PTR(-EINVAL);
-
-       obj = __vkms_gem_create(dev, size);
-       if (IS_ERR(obj))
-               return ERR_CAST(obj);
-
-       ret = drm_gem_handle_create(file, &obj->gem, handle);
-       if (ret)
-               return ERR_PTR(ret);
-
-       return &obj->gem;
-}
-
-int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
-                    struct drm_mode_create_dumb *args)
-{
-       struct drm_gem_object *gem_obj;
-       u64 pitch, size;
-
-       if (!args || !dev || !file)
-               return -EINVAL;
-
-       pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
-       size = pitch * args->height;
-
-       if (!size)
-               return -EINVAL;
-
-       gem_obj = vkms_gem_create(dev, file, &args->handle, size);
-       if (IS_ERR(gem_obj))
-               return PTR_ERR(gem_obj);
-
-       args->size = gem_obj->size;
-       args->pitch = pitch;
-
-       drm_gem_object_put(gem_obj);
-
-       DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
-
-       return 0;
-}
-
-static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
-{
-       struct drm_gem_object *gem_obj = &vkms_obj->gem;
-
-       if (!vkms_obj->pages) {
-               struct page **pages = drm_gem_get_pages(gem_obj);
-
-               if (IS_ERR(pages))
-                       return pages;
-
-               if (cmpxchg(&vkms_obj->pages, NULL, pages))
-                       drm_gem_put_pages(gem_obj, pages, false, true);
-       }
-
-       return vkms_obj->pages;
-}
-
-void vkms_gem_vunmap(struct drm_gem_object *obj)
-{
-       struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
-
-       mutex_lock(&vkms_obj->pages_lock);
-       if (vkms_obj->vmap_count < 1) {
-               WARN_ON(vkms_obj->vaddr);
-               WARN_ON(vkms_obj->pages);
-               mutex_unlock(&vkms_obj->pages_lock);
-               return;
-       }
-
-       vkms_obj->vmap_count--;
-
-       if (vkms_obj->vmap_count == 0) {
-               vunmap(vkms_obj->vaddr);
-               vkms_obj->vaddr = NULL;
-               drm_gem_put_pages(obj, vkms_obj->pages, false, true);
-               vkms_obj->pages = NULL;
-       }
-
-       mutex_unlock(&vkms_obj->pages_lock);
-}
-
-int vkms_gem_vmap(struct drm_gem_object *obj)
-{
-       struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
-       int ret = 0;
-
-       mutex_lock(&vkms_obj->pages_lock);
-
-       if (!vkms_obj->vaddr) {
-               unsigned int n_pages = obj->size >> PAGE_SHIFT;
-               struct page **pages = _get_pages(vkms_obj);
-
-               if (IS_ERR(pages)) {
-                       ret = PTR_ERR(pages);
-                       goto out;
-               }
-
-               vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
-               if (!vkms_obj->vaddr)
-                       goto err_vmap;
-       }
-
-       vkms_obj->vmap_count++;
-       goto out;
-
-err_vmap:
-       ret = -ENOMEM;
-       drm_gem_put_pages(obj, vkms_obj->pages, false, true);
-       vkms_obj->pages = NULL;
-out:
-       mutex_unlock(&vkms_obj->pages_lock);
-       return ret;
-}
-
-struct drm_gem_object *
-vkms_prime_import_sg_table(struct drm_device *dev,
-                          struct dma_buf_attachment *attach,
-                          struct sg_table *sg)
-{
-       struct vkms_gem_object *obj;
-       int npages;
-
-       obj = __vkms_gem_create(dev, attach->dmabuf->size);
-       if (IS_ERR(obj))
-               return ERR_CAST(obj);
-
-       npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
-       DRM_DEBUG_PRIME("Importing %d pages\n", npages);
-
-       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-       if (!obj->pages) {
-               vkms_gem_free_object(&obj->gem);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-       return &obj->gem;
-}
index 6d31265a2ab7be62697066e6a93529dd389836eb..9890137bcb8d2c9162ae149b4ad8c07edec71920 100644 (file)
@@ -5,6 +5,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
 
 #include "vkms_drv.h"
 
@@ -145,15 +146,15 @@ static int vkms_prepare_fb(struct drm_plane *plane,
                           struct drm_plane_state *state)
 {
        struct drm_gem_object *gem_obj;
-       int ret;
+       void *vaddr;
 
        if (!state->fb)
                return 0;
 
        gem_obj = drm_gem_fb_get_obj(state->fb, 0);
-       ret = vkms_gem_vmap(gem_obj);
-       if (ret)
-               DRM_ERROR("vmap failed: %d\n", ret);
+       vaddr = drm_gem_shmem_vmap(gem_obj);
+       if (IS_ERR(vaddr))
+               DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr));
 
        return drm_gem_fb_prepare_fb(plane, state);
 }
@@ -162,12 +163,14 @@ static void vkms_cleanup_fb(struct drm_plane *plane,
                            struct drm_plane_state *old_state)
 {
        struct drm_gem_object *gem_obj;
+       struct drm_gem_shmem_object *shmem_obj;
 
        if (!old_state->fb)
                return;
 
        gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
-       vkms_gem_vunmap(gem_obj);
+       shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0));
+       drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr);
 }
 
 static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
index 094fa4aa061d078d575828756bd402ef559ad051..26b903926872efcc5349eecd8faf490279486e02 100644 (file)
@@ -6,6 +6,7 @@
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
 
 static const u32 vkms_wb_formats[] = {
        DRM_FORMAT_XRGB8888,
@@ -63,22 +64,20 @@ static int vkms_wb_connector_get_modes(struct drm_connector *connector)
 static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
                               struct drm_writeback_job *job)
 {
-       struct vkms_gem_object *vkms_obj;
        struct drm_gem_object *gem_obj;
-       int ret;
+       void *vaddr;
 
        if (!job->fb)
                return 0;
 
        gem_obj = drm_gem_fb_get_obj(job->fb, 0);
-       ret = vkms_gem_vmap(gem_obj);
-       if (ret) {
-               DRM_ERROR("vmap failed: %d\n", ret);
-               return ret;
+       vaddr = drm_gem_shmem_vmap(gem_obj);
+       if (IS_ERR(vaddr)) {
+               DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr));
+               return PTR_ERR(vaddr);
        }
 
-       vkms_obj = drm_gem_to_vkms_gem(gem_obj);
-       job->priv = vkms_obj->vaddr;
+       job->priv = vaddr;
 
        return 0;
 }
@@ -93,7 +92,7 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
                return;
 
        gem_obj = drm_gem_fb_get_obj(job->fb, 0);
-       vkms_gem_vunmap(gem_obj);
+       drm_gem_shmem_vunmap(gem_obj, job->priv);
 
        vkmsdev = drm_device_to_vkms_device(gem_obj->dev);
        vkms_set_composer(&vkmsdev->output, false);
index e8d66182cd7b553ad7da9c588681838f69fb9383..f21881e087dbb53678a9165124a584beab6e04f4 100644 (file)
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
        int ret = 0;
 
        /* Buffer objects need to be either pinned or reserved: */
-       if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
+       if (!(dst->pin_count))
                dma_resv_assert_held(dst->base.resv);
-       if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
+       if (!(src->pin_count))
                dma_resv_assert_held(src->base.resv);
 
        if (!ttm_tt_is_populated(dst->ttm)) {
@@ -484,8 +484,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
        d.src_pages = src->ttm->pages;
        d.dst_num_pages = dst->num_pages;
        d.src_num_pages = src->num_pages;
-       d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
-       d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+       d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
+       d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
        d.diff = diff;
 
        for (j = 0; j < h; ++j) {
index 813f1b14809415fa78eae62128677dc2832ca485..263d76ae43f02d870a0670d57fc4b7efab908088 100644 (file)
@@ -106,7 +106,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
-       if (buf->pin_count > 0)
+       if (buf->base.pin_count > 0)
                ret = ttm_bo_mem_compat(placement, &bo->mem,
                                        &new_flags) == true ? 0 : -EINVAL;
        else
@@ -155,7 +155,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
-       if (buf->pin_count > 0) {
+       if (buf->base.pin_count > 0) {
                ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
                                        &new_flags) == true ? 0 : -EINVAL;
                goto out_unreserve;
@@ -246,12 +246,12 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
            bo->mem.start > 0 &&
-           buf->pin_count == 0) {
+           buf->base.pin_count == 0) {
                ctx.interruptible = false;
                (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
        }
 
-       if (buf->pin_count > 0)
+       if (buf->base.pin_count > 0)
                ret = ttm_bo_mem_compat(&placement, &bo->mem,
                                        &new_flags) == true ? 0 : -EINVAL;
        else
@@ -343,23 +343,13 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 
        dma_resv_assert_held(bo->base.resv);
 
-       if (pin) {
-               if (vbo->pin_count++ > 0)
-                       return;
-       } else {
-               WARN_ON(vbo->pin_count <= 0);
-               if (--vbo->pin_count > 0)
-                       return;
-       }
+       if (pin == !!bo->pin_count)
+               return;
 
        pl.fpfn = 0;
        pl.lpfn = 0;
        pl.mem_type = bo->mem.mem_type;
        pl.flags = bo->mem.placement;
-       if (pin)
-               pl.flags |= TTM_PL_FLAG_NO_EVICT;
-       else
-               pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
 
        memset(&placement, 0, sizeof(placement));
        placement.num_placement = 1;
@@ -368,8 +358,12 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
        ret = ttm_bo_validate(bo, &placement, &ctx);
 
        BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
 
+       if (pin)
+               ttm_bo_pin(bo);
+       else
+               ttm_bo_unpin(bo);
+}
 
 /**
  * vmw_bo_map_and_cache - Map a buffer object and cache the map
@@ -487,6 +481,49 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
        ttm_prime_object_kfree(vmw_user_bo, prime);
 }
 
+/**
+ * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @size: size of the BO we need
+ * @placement: where to put it
+ * @p_bo: resulting BO
+ *
+ * Creates and pin a simple BO for in kernel use.
+ */
+int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
+                        struct ttm_placement *placement,
+                        struct ttm_buffer_object **p_bo)
+{
+       unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       struct ttm_operation_ctx ctx = { false, false };
+       struct ttm_buffer_object *bo;
+       size_t acc_size;
+       int ret;
+
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+       if (unlikely(!bo))
+               return -ENOMEM;
+
+       acc_size = ttm_round_pot(sizeof(*bo));
+       acc_size += ttm_round_pot(npages * sizeof(void *));
+       acc_size += ttm_round_pot(sizeof(struct ttm_tt));
+       ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
+                                  ttm_bo_type_device, placement, 0,
+                                  &ctx, acc_size, NULL, NULL, NULL);
+       if (unlikely(ret))
+               goto error_free;
+
+       ttm_bo_pin(bo);
+       ttm_bo_unreserve(bo);
+       *p_bo = bo;
+
+       return 0;
+
+error_free:
+       kfree(bo);
+       return ret;
+}
 
 /**
  * vmw_bo_init - Initialize a vmw buffer object
@@ -496,6 +533,7 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
  * @size: Buffer object size in bytes.
  * @placement: Initial placement.
  * @interruptible: Whether waits should be performed interruptible.
+ * @pin: If the BO should be created pinned at a fixed location.
  * @bo_free: The buffer object destructor.
  * Returns: Zero on success, negative error code on error.
  *
@@ -504,9 +542,10 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
 int vmw_bo_init(struct vmw_private *dev_priv,
                struct vmw_buffer_object *vmw_bo,
                size_t size, struct ttm_placement *placement,
-               bool interruptible,
+               bool interruptible, bool pin,
                void (*bo_free)(struct ttm_buffer_object *bo))
 {
+       struct ttm_operation_ctx ctx = { interruptible, false };
        struct ttm_bo_device *bdev = &dev_priv->bdev;
        size_t acc_size;
        int ret;
@@ -520,11 +559,16 @@ int vmw_bo_init(struct vmw_private *dev_priv,
        vmw_bo->base.priority = 3;
        vmw_bo->res_tree = RB_ROOT;
 
-       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
-                         ttm_bo_type_device, placement,
-                         0, interruptible, acc_size,
-                         NULL, NULL, bo_free);
-       return ret;
+       ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
+                                  ttm_bo_type_device, placement,
+                                  0, &ctx, acc_size, NULL, NULL, bo_free);
+       if (unlikely(ret))
+               return ret;
+
+       if (pin)
+               ttm_bo_pin(&vmw_bo->base);
+       ttm_bo_unreserve(&vmw_bo->base);
+       return 0;
 }
 
 
@@ -613,7 +657,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
        ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
                          (dev_priv->has_mob) ?
                          &vmw_sys_placement :
-                         &vmw_vram_sys_placement, true,
+                         &vmw_vram_sys_placement, true, false,
                          &vmw_user_bo_destroy);
        if (unlikely(ret != 0))
                return ret;
@@ -1148,9 +1192,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
 {
        struct vmw_buffer_object *vbo;
 
-       if (mem == NULL)
-               return;
-
        /* Make sure @bo is embedded in a struct vmw_buffer_object? */
        if (bo->destroy != vmw_bo_bo_free &&
            bo->destroy != vmw_user_bo_destroy)
index 3b41cf63110ad9e5ef8b432a5b9f4f156af770ff..9a9fe10d829b892c8407bbe755a84c9b70210c3b 100644 (file)
@@ -1245,9 +1245,9 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
                    !dev_priv->has_mob)
                        return -ENOMEM;
 
-               ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
-                                   &vmw_mob_ne_placement, 0, false,
-                                   &man->cmd_space);
+               ret = vmw_bo_create_kernel(dev_priv, size,
+                                          &vmw_mob_placement,
+                                          &man->cmd_space);
                if (ret)
                        return ret;
 
index 65e8e7a977246ad5a7fa9abecc1100bb1c683471..984d8884357d9088565d1ee1e506ebfc11ee42bf 100644 (file)
@@ -410,8 +410,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        if (!buf)
                return -ENOMEM;
 
-       ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
-                         true, vmw_bo_bo_free);
+       ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
+                         true, true, vmw_bo_bo_free);
        if (ret) {
                DRM_ERROR("Failed initializing new cotable MOB.\n");
                return ret;
index 31e3e5c9f362238c89ae3eca99c119ebaf1290fb..b3a60959b5d5b0cfc6240086a64cf5dcc334cd7b 100644 (file)
@@ -372,7 +372,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
                return -ENOMEM;
 
        ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
-                         &vmw_sys_ne_placement, false,
+                         &vmw_sys_placement, false, true,
                          &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                return ret;
@@ -468,7 +468,10 @@ out_no_query_bo:
        if (dev_priv->cman)
                vmw_cmdbuf_remove_pool(dev_priv->cman);
        if (dev_priv->has_mob) {
-               (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+               struct ttm_resource_manager *man;
+
+               man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
+               ttm_resource_manager_evict_all(&dev_priv->bdev, man);
                vmw_otables_takedown(dev_priv);
        }
        if (dev_priv->cman)
@@ -501,7 +504,10 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
                vmw_cmdbuf_remove_pool(dev_priv->cman);
 
        if (dev_priv->has_mob) {
-               ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+               struct ttm_resource_manager *man;
+
+               man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
+               ttm_resource_manager_evict_all(&dev_priv->bdev, man);
                vmw_otables_takedown(dev_priv);
        }
 }
@@ -589,10 +595,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
        else
                dev_priv->map_mode = vmw_dma_map_populate;
 
-        if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
-           (dev_priv->map_mode == vmw_dma_alloc_coherent))
-               return -EINVAL;
-
        DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
        return 0;
 }
@@ -872,10 +874,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        drm_vma_offset_manager_init(&dev_priv->vma_manager,
                                    DRM_FILE_PAGE_OFFSET_START,
                                    DRM_FILE_PAGE_OFFSET_SIZE);
-       ret = ttm_bo_device_init(&dev_priv->bdev,
-                                &vmw_bo_driver,
+       ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
+                                dev_priv->dev->dev,
                                 dev->anon_inode->i_mapping,
                                 &dev_priv->vma_manager,
+                                dev_priv->map_mode == vmw_dma_alloc_coherent,
                                 false);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -1257,7 +1260,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
        if (ttm_resource_manager_used(man)) {
                ttm_resource_manager_set_used(man, false);
                spin_unlock(&dev_priv->svga_lock);
-               if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+               if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
                        DRM_ERROR("Failed evicting VRAM buffers.\n");
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          SVGA_REG_ENABLE_HIDE |
@@ -1364,6 +1367,10 @@ static int vmw_pm_freeze(struct device *kdev)
        struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
        int ret;
 
        /*
@@ -1384,7 +1391,7 @@ static int vmw_pm_freeze(struct device *kdev)
        vmw_execbuf_release_pinned_bo(dev_priv);
        vmw_resource_evict_all(dev_priv);
        vmw_release_device_early(dev_priv);
-       ttm_bo_swapout_all();
+       while (ttm_bo_swapout(&ctx) == 0);
        if (dev_priv->enable_fb)
                vmw_fifo_resource_dec(dev_priv);
        if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
index 1523b51a7284c03928195c67063db5ab96762f8d..b45becbb00f8e2e1b8148379eb7089d74c30e8da 100644 (file)
@@ -99,7 +99,6 @@ struct vmw_fpriv {
  * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
  * @base: The TTM buffer object
  * @res_tree: RB tree of resources using this buffer object as a backing MOB
- * @pin_count: pin depth
  * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
  * increased. May be decreased without reservation.
  * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
@@ -110,7 +109,6 @@ struct vmw_fpriv {
 struct vmw_buffer_object {
        struct ttm_buffer_object base;
        struct rb_root res_tree;
-       s32 pin_count;
        atomic_t cpu_writers;
        /* Not ref-counted.  Protected by binding_mutex */
        struct vmw_resource *dx_query_ctx;
@@ -845,10 +843,14 @@ extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
                                 SVGAGuestPtr *ptr);
 extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
 extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
+                               unsigned long size,
+                               struct ttm_placement *placement,
+                               struct ttm_buffer_object **p_bo);
 extern int vmw_bo_init(struct vmw_private *dev_priv,
                       struct vmw_buffer_object *vmw_bo,
                       size_t size, struct ttm_placement *placement,
-                      bool interruptible,
+                      bool interruptible, bool pin,
                       void (*bo_free)(struct ttm_buffer_object *bo));
 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
                                     struct ttm_object_file *tfile);
@@ -1005,16 +1007,12 @@ extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
 
 extern const size_t vmw_tt_size;
 extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_ne_placement;
 extern struct ttm_placement vmw_vram_sys_placement;
 extern struct ttm_placement vmw_vram_gmr_placement;
-extern struct ttm_placement vmw_vram_gmr_ne_placement;
 extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_sys_ne_placement;
 extern struct ttm_placement vmw_evictable_placement;
 extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_placement vmw_mob_placement;
-extern struct ttm_placement vmw_mob_ne_placement;
 extern struct ttm_placement vmw_nonfixed_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern const struct vmw_sg_table *
index c59806d40e15bc2df5814b49566da3c2b6dcbb96..4d60201037d150c211d610ea41ddd573ada98185 100644 (file)
@@ -406,7 +406,7 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
 
        ret = vmw_bo_init(vmw_priv, vmw_bo, size,
                              &vmw_sys_placement,
-                             false,
+                             false, false,
                              &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto err_unlock; /* init frees the buffer on failure */
index 551042489036b6fcba8c729f0bc077c19058ca79..be325a62c1781c17329fd6fbccafb12b859999e1 100644 (file)
@@ -143,7 +143,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
 
        ttm_resource_manager_set_used(man, false);
 
-       ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+       ttm_resource_manager_evict_all(&dev_priv->bdev, man);
 
        ttm_resource_manager_cleanup(man);
 
index c4017c7a24db698b6830bc7d2c931972947b9126..9d1de5b5cc6a7f42f97a00b1e1ea9695fbf4d5ad 100644 (file)
@@ -214,7 +214,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
  * CRTC, it makes more sense to do those at plane update time.
  */
 static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
 }
 
@@ -224,7 +224,7 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
  * @crtc: CRTC to be turned off
  */
 static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
 }
 
index d4d66532f9c904b2231c8da6a67653c9f72a9b69..0b76b3d17d4ce448cdb6a84c621d4f3079413623 100644 (file)
@@ -206,7 +206,7 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
  * @start: First page of the range within the buffer object.
  * @end: Last page of the range within the buffer object + 1.
  *
- * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
+ * This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
  */
 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
                        pgoff_t start, pgoff_t end)
index c0f156078ddae5688280a391c133f9d5eb828d14..00b535831a7a7767b36ded6f6d6957f8b18dc6ee 100644 (file)
@@ -370,7 +370,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
 
        ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
                              res->func->backup_placement,
-                             interruptible,
+                             interruptible, false,
                              &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto out_no_bo;
@@ -867,7 +867,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
        mutex_lock(&dev_priv->binding_mutex);
 
        dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
-       if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+       if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
                mutex_unlock(&dev_priv->binding_mutex);
                return;
        }
@@ -1002,7 +1002,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
                        vbo = res->backup;
 
                        ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
-                       if (!vbo->pin_count) {
+                       if (!vbo->base.pin_count) {
                                ret = ttm_bo_validate
                                        (&vbo->base,
                                         res->func->backup_placement,
index 4bf0f5ec4fc2d94663970d4cf8e010cf5c069933..4bdad2f2d13089bba2c2e56392f3bca3974d4011 100644 (file)
@@ -279,7 +279,7 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
  * This is called after a mode set has been completed.
  */
 static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
 {
 }
 
@@ -289,7 +289,7 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
  * @crtc: CRTC to be turned off
  */
 static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
        struct vmw_private *dev_priv;
        struct vmw_screen_object_unit *sou;
@@ -451,8 +451,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
         */
        vmw_overlay_pause_all(dev_priv);
        ret = vmw_bo_init(dev_priv, vps->bo, size,
-                             &vmw_vram_ne_placement,
-                             false, &vmw_bo_bo_free);
+                             &vmw_vram_placement,
+                             false, true, &vmw_bo_bo_free);
        vmw_overlay_resume_all(dev_priv);
        if (ret) {
                vps->bo = NULL; /* vmw_bo_init frees on error */
index e139fdfd16356920c7eaee9ecae623e185f55ed1..f328aa5839a222e024dd76a2da6efe3cdda8af55 100644 (file)
@@ -978,8 +978,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
        if (unlikely(!buf))
                return -ENOMEM;
 
-       ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
-                             true, vmw_bo_bo_free);
+       ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
+                             true, true, vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto out;
 
index cf3aafd00837c634b24dfebe12487852ced84431..5b04ec047ef36c23d0cdb430d25fa405927376bf 100644 (file)
@@ -408,12 +408,12 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
 }
 
 static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
 {
 }
 
 static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
-                                        struct drm_crtc_state *old_state)
+                                        struct drm_atomic_state *state)
 {
        struct vmw_private *dev_priv;
        struct vmw_screen_target_display_unit *stdu;
index c8427998fa35b333390462eab109db5e166cece6..155ca3a5c7e55400596bec864bb0cc1d0ba5aebc 100644 (file)
@@ -152,7 +152,7 @@ void vmw_thp_fini(struct vmw_private *dev_priv)
 
        ttm_resource_manager_set_used(man, false);
 
-       ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+       ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
        if (ret)
                return;
        spin_lock(&rman->lock);
index 73116ec70ba59547a18ed9e4478157c1d8c7fadd..51f70bea41cce4d461bbf2a042cee9ee91b7de11 100644 (file)
 #include "vmwgfx_drv.h"
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
 
 static const struct ttm_place vram_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = TTM_PL_VRAM,
-       .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place vram_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = TTM_PL_VRAM,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
 };
 
 static const struct ttm_place sys_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = TTM_PL_SYSTEM,
-       .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place sys_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = TTM_PL_SYSTEM,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
 };
 
 static const struct ttm_place gmr_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = VMW_PL_GMR,
-       .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place gmr_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = VMW_PL_GMR,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
 };
 
 static const struct ttm_place mob_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = VMW_PL_MOB,
-       .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place mob_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = VMW_PL_MOB,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
 };
 
 struct ttm_placement vmw_vram_placement = {
@@ -98,12 +69,12 @@ static const struct ttm_place vram_gmr_placement_flags[] = {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
 };
 
@@ -112,12 +83,12 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
 };
 
@@ -128,29 +99,6 @@ struct ttm_placement vmw_vram_gmr_placement = {
        .busy_placement = &gmr_placement_flags
 };
 
-static const struct ttm_place vram_gmr_ne_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED |
-                        TTM_PL_FLAG_NO_EVICT
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED |
-                        TTM_PL_FLAG_NO_EVICT
-       }
-};
-
-struct ttm_placement vmw_vram_gmr_ne_placement = {
-       .num_placement = 2,
-       .placement = vram_gmr_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &gmr_ne_placement_flags
-};
-
 struct ttm_placement vmw_vram_sys_placement = {
        .num_placement = 1,
        .placement = &vram_placement_flags,
@@ -158,13 +106,6 @@ struct ttm_placement vmw_vram_sys_placement = {
        .busy_placement = &sys_placement_flags
 };
 
-struct ttm_placement vmw_vram_ne_placement = {
-       .num_placement = 1,
-       .placement = &vram_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &vram_ne_placement_flags
-};
-
 struct ttm_placement vmw_sys_placement = {
        .num_placement = 1,
        .placement = &sys_placement_flags,
@@ -172,34 +113,27 @@ struct ttm_placement vmw_sys_placement = {
        .busy_placement = &sys_placement_flags
 };
 
-struct ttm_placement vmw_sys_ne_placement = {
-       .num_placement = 1,
-       .placement = &sys_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_ne_placement_flags
-};
-
 static const struct ttm_place evictable_placement_flags[] = {
        {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_MOB,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
 };
 
@@ -208,17 +142,17 @@ static const struct ttm_place nonfixed_placement_flags[] = {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_MOB,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
 };
 
@@ -243,13 +177,6 @@ struct ttm_placement vmw_mob_placement = {
        .busy_placement = &mob_placement_flags
 };
 
-struct ttm_placement vmw_mob_ne_placement = {
-       .num_placement = 1,
-       .num_busy_placement = 1,
-       .placement = &mob_ne_placement_flags,
-       .busy_placement = &mob_ne_placement_flags
-};
-
 struct ttm_placement vmw_nonfixed_placement = {
        .num_placement = 3,
        .placement = nonfixed_placement_flags,
@@ -258,7 +185,7 @@ struct ttm_placement vmw_nonfixed_placement = {
 };
 
 struct vmw_ttm_tt {
-       struct ttm_dma_tt dma_ttm;
+       struct ttm_tt dma_ttm;
        struct vmw_private *dev_priv;
        int gmr_id;
        struct vmw_mob *mob;
@@ -438,8 +365,8 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
                return 0;
 
        vsgt->mode = dev_priv->map_mode;
-       vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
-       vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+       vsgt->pages = vmw_tt->dma_ttm.pages;
+       vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
        vsgt->addrs = vmw_tt->dma_ttm.dma_address;
        vsgt->sgt = &vmw_tt->sgt;
 
@@ -549,7 +476,7 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
 {
        struct vmw_ttm_tt *vmw_tt =
-               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
 
        return &vmw_tt->vsgt;
 }
@@ -559,7 +486,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
                        struct ttm_tt *ttm, struct ttm_resource *bo_mem)
 {
        struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
        int ret = 0;
 
        if (!bo_mem)
@@ -603,7 +530,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
                           struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
 
        if (!vmw_be->bound)
                return;
@@ -628,13 +555,13 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
 static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
 
        vmw_ttm_unbind(bdev, ttm);
        ttm_tt_destroy_common(bdev, ttm);
        vmw_ttm_unmap_dma(vmw_be);
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-               ttm_dma_tt_fini(&vmw_be->dma_ttm);
+               ttm_tt_fini(&vmw_be->dma_ttm);
        else
                ttm_tt_fini(ttm);
 
@@ -648,40 +575,18 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 static int vmw_ttm_populate(struct ttm_bo_device *bdev,
                            struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-       struct vmw_ttm_tt *vmw_tt =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-       struct vmw_private *dev_priv = vmw_tt->dev_priv;
-       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-       int ret;
-
+       /* TODO: maybe completely drop this ? */
        if (ttm_tt_is_populated(ttm))
                return 0;
 
-       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
-               size_t size =
-                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-               ret = ttm_mem_global_alloc(glob, size, ctx);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
-                                       ctx);
-               if (unlikely(ret != 0))
-                       ttm_mem_global_free(glob, size);
-       } else
-               ret = ttm_pool_populate(ttm, ctx);
-
-       return ret;
+       return ttm_pool_alloc(&bdev->pool, ttm, ctx);
 }
 
 static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
                               struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
-                                                dma_ttm.ttm);
-       struct vmw_private *dev_priv = vmw_tt->dev_priv;
-       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-
+                                                dma_ttm);
 
        if (vmw_tt->mob) {
                vmw_mob_destroy(vmw_tt->mob);
@@ -689,14 +594,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
        }
 
        vmw_ttm_unmap_dma(vmw_tt);
-       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
-               size_t size =
-                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-
-               ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
-               ttm_mem_global_free(glob, size);
-       } else
-               ttm_pool_unpopulate(ttm);
+       ttm_pool_free(&bdev->pool, ttm);
 }
 
 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
@@ -713,13 +611,15 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
        vmw_be->mob = NULL;
 
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+                                     ttm_cached);
        else
-               ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+               ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+                                 ttm_cached);
        if (unlikely(ret != 0))
                goto out_no_init;
 
-       return &vmw_be->dma_ttm.ttm;
+       return &vmw_be->dma_ttm;
 out_no_init:
        kfree(vmw_be);
        return NULL;
@@ -752,6 +652,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc
                mem->bus.offset = (mem->start << PAGE_SHIFT) +
                        dev_priv->vram_start;
                mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_cached;
                break;
        default:
                return -EINVAL;
@@ -773,6 +674,8 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
                            bool evict,
                            struct ttm_resource *mem)
 {
+       if (!mem)
+               return;
        vmw_bo_move_notify(bo, mem);
        vmw_query_move_notify(bo, mem);
 }
@@ -789,19 +692,65 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
        (void) ttm_bo_wait(bo, false, false);
 }
 
+static int vmw_move(struct ttm_buffer_object *bo,
+                   bool evict,
+                   struct ttm_operation_ctx *ctx,
+                   struct ttm_resource *new_mem)
+{
+       struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+       struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
+       int ret;
+
+       if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+               ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
+               if (ret)
+                       return ret;
+       }
+
+       vmw_move_notify(bo, evict, new_mem);
+
+       if (old_man->use_tt && new_man->use_tt) {
+               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+                       ttm_bo_assign_mem(bo, new_mem);
+                       return 0;
+               }
+               ret = ttm_bo_wait_ctx(bo, ctx);
+               if (ret)
+                       goto fail;
+
+               vmw_ttm_unbind(bo->bdev, bo->ttm);
+               ttm_resource_free(bo, &bo->mem);
+               ttm_bo_assign_mem(bo, new_mem);
+               return 0;
+       } else {
+               ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+               if (ret)
+                       goto fail;
+       }
+       return 0;
+fail:
+       swap(*new_mem, bo->mem);
+       vmw_move_notify(bo, false, new_mem);
+       swap(*new_mem, bo->mem);
+       return ret;
+}
+
+static void
+vmw_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+       vmw_move_notify(bo, false, NULL);
+}
 
 struct ttm_bo_driver vmw_bo_driver = {
        .ttm_tt_create = &vmw_ttm_tt_create,
        .ttm_tt_populate = &vmw_ttm_populate,
        .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
-       .ttm_tt_bind = &vmw_ttm_bind,
-       .ttm_tt_unbind = &vmw_ttm_unbind,
        .ttm_tt_destroy = &vmw_ttm_destroy,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = vmw_evict_flags,
-       .move = NULL,
+       .move = vmw_move,
        .verify_access = vmw_verify_access,
-       .move_notify = vmw_move_notify,
+       .delete_mem_notify = vmw_delete_mem_notify,
        .swap_notify = vmw_swap_notify,
        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
 };
@@ -817,11 +766,9 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
        struct ttm_buffer_object *bo;
        int ret;
 
-       ret = ttm_bo_create(&dev_priv->bdev, bo_size,
-                           ttm_bo_type_device,
-                           &vmw_sys_ne_placement,
-                           0, false, &bo);
-
+       ret = vmw_bo_create_kernel(dev_priv, bo_size,
+                                  &vmw_sys_placement,
+                                  &bo);
        if (unlikely(ret != 0))
                return ret;
 
@@ -830,7 +777,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
        ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
        if (likely(ret == 0)) {
                struct vmw_ttm_tt *vmw_tt =
-                       container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+                       container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
                ret = vmw_ttm_map_dma(vmw_tt);
        }
 
index e69bc373ae2e54db4cd5e765a5fe94628240a988..f2e2bf6d1421fd8751348e6525ef5ea3411fdd6b 100644 (file)
@@ -540,7 +540,7 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
        if (atomic_read(&vbo->cpu_writers))
                return -EBUSY;
 
-       if (vbo->pin_count > 0)
+       if (vbo->base.pin_count > 0)
                return 0;
 
        if (validate_as_mob)
index cc93a8c9547bcc76814294713ae1c8910b30ca75..98b6d2ba088ac1f26df629833e7e70fb51082488 100644 (file)
@@ -381,6 +381,23 @@ void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
                                        fb_cookie);
 }
 
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
+{
+       struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
+       int idx;
+
+       if (drm_dev_enter(obj->dev, &idx)) {
+               xen_drm_front_dbuf_destroy(drm_info->front_info,
+                                          xen_drm_front_dbuf_to_cookie(obj));
+               drm_dev_exit(idx);
+       } else {
+               dbuf_free(&drm_info->front_info->dbuf_list,
+                         xen_drm_front_dbuf_to_cookie(obj));
+       }
+
+       xen_drm_front_gem_free_object_unlocked(obj);
+}
+
 static int xen_drm_drv_dumb_create(struct drm_file *filp,
                                   struct drm_device *dev,
                                   struct drm_mode_create_dumb *args)
@@ -435,23 +452,6 @@ fail:
        return ret;
 }
 
-static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
-{
-       struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
-       int idx;
-
-       if (drm_dev_enter(obj->dev, &idx)) {
-               xen_drm_front_dbuf_destroy(drm_info->front_info,
-                                          xen_drm_front_dbuf_to_cookie(obj));
-               drm_dev_exit(idx);
-       } else {
-               dbuf_free(&drm_info->front_info->dbuf_list,
-                         xen_drm_front_dbuf_to_cookie(obj));
-       }
-
-       xen_drm_front_gem_free_object_unlocked(obj);
-}
-
 static void xen_drm_drv_release(struct drm_device *dev)
 {
        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
@@ -483,22 +483,12 @@ static const struct file_operations xen_drm_dev_fops = {
        .mmap           = xen_drm_front_gem_mmap,
 };
 
-static const struct vm_operations_struct xen_drm_drv_vm_ops = {
-       .open           = drm_gem_vm_open,
-       .close          = drm_gem_vm_close,
-};
-
 static struct drm_driver xen_drm_driver = {
        .driver_features           = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .release                   = xen_drm_drv_release,
-       .gem_vm_ops                = &xen_drm_drv_vm_ops,
-       .gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
        .prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
-       .gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
-       .gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
-       .gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
        .gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
        .dumb_create               = xen_drm_drv_dumb_create,
        .fops                      = &xen_drm_dev_fops,
index 54486d89650e1ed2e319e8f6c84c0f0bea23a70c..cefafe859aba3f7c628a9a6b59fea4b856af14fd 100644 (file)
@@ -160,4 +160,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
                                 int conn_idx, u64 fb_cookie);
 
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj);
+
 #endif /* __XEN_DRM_FRONT_H_ */
index 2f464ef2d53e0a802ef04cbc2ad6d37eee796a58..4f34ef34ba60601e0657153dfa02ba8682bf353e 100644 (file)
@@ -57,6 +57,19 @@ static void gem_free_pages_array(struct xen_gem_object *xen_obj)
        xen_obj->pages = NULL;
 }
 
+static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+       .open           = drm_gem_vm_open,
+       .close          = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
+       .free = xen_drm_front_gem_object_free,
+       .get_sg_table = xen_drm_front_gem_get_sg_table,
+       .vmap = xen_drm_front_gem_prime_vmap,
+       .vunmap = xen_drm_front_gem_prime_vunmap,
+       .vm_ops = &xen_drm_drv_vm_ops,
+};
+
 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
                                             size_t size)
 {
@@ -67,6 +80,8 @@ static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
        if (!xen_obj)
                return ERR_PTR(-ENOMEM);
 
+       xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
+
        ret = drm_gem_object_init(dev, &xen_obj->base, size);
        if (ret < 0) {
                kfree(xen_obj);
index 98bd48f13fd11461953c397ad7db2a8142bd0cb9..5802752860dd2c7c21ada318617070429233da3f 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/dmaengine.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_dma.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/spinlock.h>
@@ -1316,8 +1315,7 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
 
                snprintf(dma_channel_name, sizeof(dma_channel_name),
                         "%s%u", dma_names[layer->id], i);
-               dma->chan = of_dma_request_slave_channel(disp->dev->of_node,
-                                                        dma_channel_name);
+               dma->chan = dma_request_chan(disp->dev, dma_channel_name);
                if (IS_ERR(dma->chan)) {
                        dev_err(disp->dev, "failed to request dma channel\n");
                        ret = PTR_ERR(dma->chan);
@@ -1449,7 +1447,7 @@ static int zynqmp_disp_crtc_setup_clock(struct drm_crtc *crtc,
 
 static void
 zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
-                              struct drm_crtc_state *old_crtc_state)
+                              struct drm_atomic_state *state)
 {
        struct zynqmp_disp *disp = crtc_to_disp(crtc);
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
@@ -1480,8 +1478,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
 
 static void
 zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
-                               struct drm_crtc_state *old_crtc_state)
+                               struct drm_atomic_state *state)
 {
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
        struct zynqmp_disp *disp = crtc_to_disp(crtc);
        struct drm_plane_state *old_plane_state;
 
index 8e69303aad3f72cbf1d358f1277e277ad177864e..f3ffc3703a0ec9081776a4956a7af596f5d9db47 100644 (file)
@@ -80,19 +80,7 @@ static struct drm_driver zynqmp_dpsub_drm_driver = {
        .driver_features                = DRIVER_MODESET | DRIVER_GEM |
                                          DRIVER_ATOMIC,
 
-       .prime_handle_to_fd             = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle             = drm_gem_prime_fd_to_handle,
-       .gem_prime_export               = drm_gem_prime_export,
-       .gem_prime_import               = drm_gem_prime_import,
-       .gem_prime_get_sg_table         = drm_gem_cma_prime_get_sg_table,
-       .gem_prime_import_sg_table      = drm_gem_cma_prime_import_sg_table,
-       .gem_prime_vmap                 = drm_gem_cma_prime_vmap,
-       .gem_prime_vunmap               = drm_gem_cma_prime_vunmap,
-       .gem_prime_mmap                 = drm_gem_cma_prime_mmap,
-       .gem_free_object_unlocked       = drm_gem_cma_free_object,
-       .gem_vm_ops                     = &drm_gem_cma_vm_ops,
-       .dumb_create                    = zynqmp_dpsub_dumb_create,
-       .dumb_destroy                   = drm_gem_dumb_destroy,
+       DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
 
        .fops                           = &zynqmp_dpsub_drm_fops,
 
index 5259ff2825f940693eb4f44bd02e11e2ebb161bf..d2a529eba3c9999ce1912eedef627e5b33f8ff19 100644 (file)
@@ -350,7 +350,7 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
 }
 
 static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
-                                 struct drm_crtc_state *old_state)
+                                 struct drm_atomic_state *state)
 {
        struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -455,7 +455,7 @@ static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
 }
 
 static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
 {
        struct zx_crtc *zcrtc = to_zx_crtc(crtc);
        const struct zx_crtc_bits *bits = zcrtc->bits;
index 087304b1a5d71bcca55c4ba3b079d5d2b88bcfc6..1401fd52f37a6cab5de214ffa04ebc286ddb628e 100644 (file)
@@ -1034,17 +1034,12 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
 static int vga_switcheroo_runtime_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       int ret;
 
        mutex_lock(&vgasr_mutex);
        vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
        mutex_unlock(&vgasr_mutex);
        pci_wakeup_bus(pdev->bus);
-       ret = dev->bus->pm->runtime_resume(dev);
-       if (ret)
-               return ret;
-
-       return 0;
+       return dev->bus->pm->runtime_resume(dev);
 }
 
 /**
index a7a9bc08dcd115c39633f7661303e4a21e0d23b2..bcfbd0e44a4a07fb40b27aeb422b09a08adcfd63 100644 (file)
@@ -417,7 +417,13 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
        }
 
-       if (prot & IOMMU_CACHE)
+       /*
+        * Also Mali has its own notions of shareability wherein its Inner
+        * domain covers the cores within the GPU, and its Outer domain is
+        * "outside the GPU" (i.e. either the Inner or System domain in CPU
+        * terms, depending on coherency).
+        */
+       if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
                pte |= ARM_LPAE_PTE_SH_IS;
        else
                pte |= ARM_LPAE_PTE_SH_OS;
@@ -1021,6 +1027,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
                                          ARM_MALI_LPAE_TTBR_READ_INNER |
                                          ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+       if (cfg->coherent_walk)
+               cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
+
        return &data->iop;
 
 out_free_data:
index 2f3a5996d3fc90a62e01f1e6c62876ace0c75ddb..a7f61ba854405f63fa0a4842a5589d9dab076a68 100644 (file)
@@ -76,9 +76,13 @@ static void *vb2_dc_cookie(void *buf_priv)
 static void *vb2_dc_vaddr(void *buf_priv)
 {
        struct vb2_dc_buf *buf = buf_priv;
+       struct dma_buf_map map;
+       int ret;
 
-       if (!buf->vaddr && buf->db_attach)
-               buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+       if (!buf->vaddr && buf->db_attach) {
+               ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+               buf->vaddr = ret ? NULL : map.vaddr;
+       }
 
        return buf->vaddr;
 }
@@ -344,11 +348,13 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
        return 0;
 }
 
-static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
 {
        struct vb2_dc_buf *buf = dbuf->priv;
 
-       return buf->vaddr;
+       dma_buf_map_set_vaddr(map, buf->vaddr);
+
+       return 0;
 }
 
 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -619,6 +625,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
 {
        struct vb2_dc_buf *buf = mem_priv;
        struct sg_table *sgt = buf->dma_sgt;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
 
        if (WARN_ON(!buf->db_attach)) {
                pr_err("trying to unpin a not attached buffer\n");
@@ -631,7 +638,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
        }
 
        if (buf->vaddr) {
-               dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+               dma_buf_vunmap(buf->db_attach->dmabuf, &map);
                buf->vaddr = NULL;
        }
        dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
index 748131151c49778b165440f568a0087b2ebf080c..030e482186874108701c1ba83806c7a1f6c19126 100644 (file)
@@ -295,14 +295,18 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
 static void *vb2_dma_sg_vaddr(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
+       struct dma_buf_map map;
+       int ret;
 
        BUG_ON(!buf);
 
        if (!buf->vaddr) {
-               if (buf->db_attach)
-                       buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
-               else
+               if (buf->db_attach) {
+                       ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+                       buf->vaddr = ret ? NULL : map.vaddr;
+               } else {
                        buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
+               }
        }
 
        /* add offset in case userptr is not page-aligned */
@@ -480,11 +484,13 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
        return 0;
 }
 
-static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
 {
        struct vb2_dma_sg_buf *buf = dbuf->priv;
 
-       return vb2_dma_sg_vaddr(buf);
+       dma_buf_map_set_vaddr(map, buf->vaddr);
+
+       return 0;
 }
 
 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -565,6 +571,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
 {
        struct vb2_dma_sg_buf *buf = mem_priv;
        struct sg_table *sgt = buf->dma_sgt;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
 
        if (WARN_ON(!buf->db_attach)) {
                pr_err("trying to unpin a not attached buffer\n");
@@ -577,7 +584,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
        }
 
        if (buf->vaddr) {
-               dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+               dma_buf_vunmap(buf->db_attach->dmabuf, &map);
                buf->vaddr = NULL;
        }
        dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
index bf5ac63a5742b7437b53855a75eaf669db4239bb..83f95258ec8c68e4443d898a356786bd8a4f7e50 100644 (file)
@@ -314,11 +314,13 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
        vb2_vmalloc_put(dbuf->priv);
 }
 
-static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
 {
        struct vb2_vmalloc_buf *buf = dbuf->priv;
 
-       return buf->vaddr;
+       dma_buf_map_set_vaddr(map, buf->vaddr);
+
+       return 0;
 }
 
 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -370,26 +372,33 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
 {
        struct vb2_vmalloc_buf *buf = mem_priv;
+       struct dma_buf_map map;
+       int ret;
 
-       buf->vaddr = dma_buf_vmap(buf->dbuf);
+       ret = dma_buf_vmap(buf->dbuf, &map);
+       if (ret)
+               return -EFAULT;
+       buf->vaddr = map.vaddr;
 
-       return buf->vaddr ? 0 : -EFAULT;
+       return 0;
 }
 
 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
 {
        struct vb2_vmalloc_buf *buf = mem_priv;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
 
-       dma_buf_vunmap(buf->dbuf, buf->vaddr);
+       dma_buf_vunmap(buf->dbuf, &map);
        buf->vaddr = NULL;
 }
 
 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
 {
        struct vb2_vmalloc_buf *buf = mem_priv;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
 
        if (buf->vaddr)
-               dma_buf_vunmap(buf->dbuf, buf->vaddr);
+               dma_buf_vunmap(buf->dbuf, &map);
 
        kfree(buf);
 }
index 994ab67bc2dce6cbb4d2216262a1825a1f7df350..70eb5ed942d036c71006a2187fdff08152eb9623 100644 (file)
@@ -586,11 +586,13 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
        kfree(a);
 }
 
-static void *fastrpc_vmap(struct dma_buf *dmabuf)
+static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
        struct fastrpc_buf *buf = dmabuf->priv;
 
-       return buf->virt;
+       dma_buf_map_set_vaddr(map, buf->virt);
+
+       return 0;
 }
 
 static int fastrpc_mmap(struct dma_buf *dmabuf,
index 6a26a364f9bd72899af481180e1cd90f6306ef19..d1bb5915082b7c00f950a501c19b6391bc913acd 100644 (file)
@@ -502,7 +502,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
        if (!fbfont)
                return NULL;
 
-       pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
+       pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
                        fbfont->width, fbfont->height, fbfont->name);
                        
        bpc = ((fbfont->width+7)/8) * fbfont->height; 
index f253daa05d9d3872777077692c51e0aa2d9fbcdb..e3812a8ff55a4fb8546a1683888aa7def9e51b63 100644 (file)
@@ -240,14 +240,6 @@ static int *MV300_reg = MV300_reg_8bit;
 
 static int inverse;
 
-extern int fontheight_8x8;
-extern int fontwidth_8x8;
-extern unsigned char fontdata_8x8[];
-
-extern int fontheight_8x16;
-extern int fontwidth_8x16;
-extern unsigned char fontdata_8x16[];
-
 /*
  * struct fb_ops {
  *     * open/release and usage marking
index 2fe69015042064277cdef8919079abbec318ae22..6851f47613e17e52e950ad0fd551d8d8b540a5b8 100644 (file)
@@ -2200,7 +2200,7 @@ static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
                                 struct bin_attribute *bin_attr,
                                 char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct fb_info *info = dev_get_drvdata(dev);
         struct radeonfb_info *rinfo = info->par;
 
@@ -2212,7 +2212,7 @@ static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
                                 struct bin_attribute *bin_attr,
                                 char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct fb_info *info = dev_get_drvdata(dev);
         struct radeonfb_info *rinfo = info->par;
 
index 15a9ee7cd734d3fa1147a9cacbe65cb06bf5b3e4..e9027172c0f552163c30c19d68f7233407766adf 100644 (file)
@@ -531,7 +531,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
 {
        int yres;
        /* memory size in pixels */
-       unsigned pixels = info->screen_size * 8 / var->bits_per_pixel;
+       unsigned int pixels;
        struct cirrusfb_info *cinfo = info->par;
 
        switch (var->bits_per_pixel) {
@@ -573,6 +573,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
                return -EINVAL;
        }
 
+       pixels = info->screen_size * 8 / var->bits_per_pixel;
        if (var->xres_virtual < var->xres)
                var->xres_virtual = var->xres;
        /* use highest possible virtual resolution */
index 8268bbee8cae1132ac26470c156c77081eed25e4..9e5c78e00995bf4a4056a64efc17b35e5b921549 100644 (file)
@@ -1215,36 +1215,30 @@ struct fb_cmap32 {
 static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
                          unsigned long arg)
 {
-       struct fb_cmap_user __user *cmap;
-       struct fb_cmap32 __user *cmap32;
-       __u32 data;
-       int err;
-
-       cmap = compat_alloc_user_space(sizeof(*cmap));
-       cmap32 = compat_ptr(arg);
+       struct fb_cmap32 cmap32;
+       struct fb_cmap cmap_from;
+       struct fb_cmap_user cmap;
 
-       if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+       if (copy_from_user(&cmap32, compat_ptr(arg), sizeof(cmap32)))
                return -EFAULT;
 
-       if (get_user(data, &cmap32->red) ||
-           put_user(compat_ptr(data), &cmap->red) ||
-           get_user(data, &cmap32->green) ||
-           put_user(compat_ptr(data), &cmap->green) ||
-           get_user(data, &cmap32->blue) ||
-           put_user(compat_ptr(data), &cmap->blue) ||
-           get_user(data, &cmap32->transp) ||
-           put_user(compat_ptr(data), &cmap->transp))
-               return -EFAULT;
+       cmap = (struct fb_cmap_user) {
+               .start  = cmap32.start,
+               .len    = cmap32.len,
+               .red    = compat_ptr(cmap32.red),
+               .green  = compat_ptr(cmap32.green),
+               .blue   = compat_ptr(cmap32.blue),
+               .transp = compat_ptr(cmap32.transp),
+       };
 
-       err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
+       if (cmd == FBIOPUTCMAP)
+               return fb_set_user_cmap(&cmap, info);
 
-       if (!err) {
-               if (copy_in_user(&cmap32->start,
-                                &cmap->start,
-                                2 * sizeof(__u32)))
-                       err = -EFAULT;
-       }
-       return err;
+       lock_fb_info(info);
+       cmap_from = info->cmap;
+       unlock_fb_info(info);
+
+       return fb_cmap_to_user(&cmap_from, &cmap);
 }
 
 static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
index a547c21c7e9280b238033634e42094c7f0a47d6c..e332017c6af6210881a30216f1c3a02fef216b9f 100644 (file)
@@ -1425,7 +1425,6 @@ static int fsl_diu_open(struct fb_info *info, int user)
 static int fsl_diu_release(struct fb_info *info, int user)
 {
        struct mfb_info *mfbi = info->par;
-       int res = 0;
 
        spin_lock(&diu_lock);
        mfbi->count--;
@@ -1447,7 +1446,7 @@ static int fsl_diu_release(struct fb_info *info, int user)
        }
 
        spin_unlock(&diu_lock);
-       return res;
+       return 0;
 }
 
 static const struct fb_ops fsl_diu_ops = {
index 570439b326552a674e89a59cce9abce202f7856a..a3853421b263ba28b9eb82d25a23df069559e389 100644 (file)
@@ -1970,9 +1970,7 @@ int matroxfb_register_driver(struct matroxfb_driver* drv) {
        struct matrox_fb_info* minfo;
 
        list_add(&drv->node, &matroxfb_driver_list);
-       for (minfo = matroxfb_l(matroxfb_list.next);
-            minfo != matroxfb_l(&matroxfb_list);
-            minfo = matroxfb_l(minfo->next_fb.next)) {
+       list_for_each_entry(minfo, &matroxfb_list, next_fb) {
                void* p;
 
                if (minfo->drivers_count == MATROXFB_MAX_FB_DRIVERS)
@@ -1990,9 +1988,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv) {
        struct matrox_fb_info* minfo;
 
        list_del(&drv->node);
-       for (minfo = matroxfb_l(matroxfb_list.next);
-            minfo != matroxfb_l(&matroxfb_list);
-            minfo = matroxfb_l(minfo->next_fb.next)) {
+       list_for_each_entry(minfo, &matroxfb_list, next_fb) {
                int i;
 
                for (i = 0; i < minfo->drivers_count; ) {
index 603731a5a72ed6d2ae216d7424b543701cb3980c..894617ddabcb6b3f8d8f6f210cfa87ea7fc1d265 100644 (file)
@@ -1428,7 +1428,6 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
        struct device *dev = mx3fb->dev;
        struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
        const char *name = mx3fb_pdata->name;
-       unsigned int irq;
        struct fb_info *fbi;
        struct mx3fb_info *mx3fbi;
        const struct fb_videomode *mode;
@@ -1441,7 +1440,6 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
        }
 
        ichan->client = mx3fb;
-       irq = ichan->eof_irq;
 
        if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
                return -EINVAL;
index 5f3e5179c25a91809607dd218c383a58e52ff273..d20b8779bb32c01837eb275426c91be48d4eeac4 100644 (file)
@@ -42,8 +42,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
                const char *pname;
                int len;
 
-               for (dp = NULL;
-                    (dp = of_get_next_child(parent, dp)) != NULL;) {
+               for_each_child_of_node(parent, dp) {
                        pname = of_get_property(dp, "name", NULL);
                        if (!pname)
                                continue;
index 63bd13ba429e4a4d2fd509124ae505ba7522f9e0..a9fd732f81030056607ccb468f41f1241e300384 100644 (file)
@@ -47,18 +47,13 @@ static int tvc_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        dev_dbg(ddata->dev, "connect\n");
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.atv->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.atv->connect(in, dssdev);
 }
 
 static void tvc_disconnect(struct omap_dss_device *dssdev)
index b4a1aefff7661da1650c9ba230c4dcb5dd96dc89..2fa436475b406d7ae1b4adbb0b86d1a4079b7632 100644 (file)
@@ -51,16 +51,11 @@ static int dvic_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.dvi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.dvi->connect(in, dssdev);
 }
 
 static void dvic_disconnect(struct omap_dss_device *dssdev)
index 49551afbdbe0c21672c5416b21e2898a71605540..670b9c6eb5a9c13294fc29fbb5063378414bf871 100644 (file)
@@ -50,18 +50,13 @@ static int hdmic_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        dev_dbg(ddata->dev, "connect\n");
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.hdmi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.hdmi->connect(in, dssdev);
 }
 
 static void hdmic_disconnect(struct omap_dss_device *dssdev)
index 37c9f5bfaefe4eff9633cab034c49d758aefd006..ff3d1e8e1e7b5bc12de9c90bbd337972503b8ce3 100644 (file)
@@ -37,16 +37,11 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.dpi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.dpi->connect(in, dssdev);
 }
 
 static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
index a3912fc8031f352420e953e00d152654ae184881..602324c5c9f9c6cd6d601c3fef87e6d957b1a757 100644 (file)
@@ -59,16 +59,11 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.dpi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.dpi->connect(in, dssdev);
 }
 
 static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
index 1293515e4b1692e1917411c5eaf6159581454825..8d8b5ff7d43c8e0bb635132667e8f57f86b2824b 100644 (file)
@@ -506,16 +506,11 @@ static int acx565akm_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.sdi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.sdi->connect(in, dssdev);
 }
 
 static void acx565akm_disconnect(struct omap_dss_device *dssdev)
index bb85b21f072487239f87005191f8c3fa117a38ac..afac1d9445aa27f08d146839cc7ecb60c7d1a871 100644 (file)
@@ -337,16 +337,11 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.dpi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.dpi->connect(in, dssdev);
 }
 
 static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
index 7ca1803bf1614bf52df78e24fe9332d5782c1dd9..726c190862d40ff8fdbf31abe07fafca2a36399f 100644 (file)
@@ -875,15 +875,7 @@ void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
 
 int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
 {
-       struct resource *res;
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
-       if (!res) {
-               DSSERR("can't get CORE mem resource\n");
-               return -EINVAL;
-       }
-
-       core->base = devm_ioremap_resource(&pdev->dev, res);
+       core->base = devm_platform_ioremap_resource_byname(pdev, "core");
        if (IS_ERR(core->base)) {
                DSSERR("can't ioremap CORE\n");
                return PTR_ERR(core->base);
index 2f6ff14a48d977ce2a62ef21256874464dd05100..eda29d3032e1e4336647104959aea7853b60a0ea 100644 (file)
@@ -887,15 +887,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
 
 int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
 {
-       struct resource *res;
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
-       if (!res) {
-               DSSERR("can't get CORE IORESOURCE_MEM HDMI\n");
-               return -EINVAL;
-       }
-
-       core->base = devm_ioremap_resource(&pdev->dev, res);
+       core->base = devm_platform_ioremap_resource_byname(pdev, "core");
        if (IS_ERR(core->base)) {
                DSSERR("can't ioremap HDMI core\n");
                return PTR_ERR(core->base);
index 9c645adba9e294e5db6054b869642f52588139a1..6fbfeb01b3157b35fbfbfced25d69f201d487388 100644 (file)
@@ -207,19 +207,11 @@ static const struct hdmi_phy_features *hdmi_phy_get_features(void)
 
 int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
 {
-       struct resource *res;
-
        phy_feat = hdmi_phy_get_features();
        if (!phy_feat)
                return -ENODEV;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
-       if (!res) {
-               DSSERR("can't get PHY mem resource\n");
-               return -EINVAL;
-       }
-
-       phy->base = devm_ioremap_resource(&pdev->dev, res);
+       phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
        if (IS_ERR(phy->base)) {
                DSSERR("can't ioremap TX PHY\n");
                return PTR_ERR(phy->base);
index 4991be031b0b42be393b7f869913470e174639e7..c5f89129dcddd08d5d3bd3abd4f997b4ea0a5ec6 100644 (file)
@@ -100,15 +100,10 @@ static int hdmi_pll_enable(struct dss_pll *dsspll)
 {
        struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
        struct hdmi_wp_data *wp = pll->wp;
-       u16 r = 0;
 
        dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
 
-       r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
-       if (r)
-               return r;
-
-       return 0;
+       return hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
 }
 
 static void hdmi_pll_disable(struct dss_pll *dsspll)
@@ -220,17 +215,10 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
        struct hdmi_wp_data *wp)
 {
        int r;
-       struct resource *res;
 
        pll->wp = wp;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
-       if (!res) {
-               DSSERR("can't get PLL mem resource\n");
-               return -EINVAL;
-       }
-
-       pll->base = devm_ioremap_resource(&pdev->dev, res);
+       pll->base = devm_platform_ioremap_resource_byname(pdev, "pll");
        if (IS_ERR(pll->base)) {
                DSSERR("can't ioremap PLLCTRL\n");
                return PTR_ERR(pll->base);
index f560fa4d7786eb9219eec21851dad9508af6d4f8..905d642ff9ed70afaa08b4e0dc0d599e00b4c120 100644 (file)
@@ -890,8 +890,7 @@ static int venc_remove(struct platform_device *pdev)
 
 static int venc_runtime_suspend(struct device *dev)
 {
-       if (venc.tv_dac_clk)
-               clk_disable_unprepare(venc.tv_dac_clk);
+       clk_disable_unprepare(venc.tv_dac_clk);
 
        dispc_runtime_put();
 
@@ -906,8 +905,7 @@ static int venc_runtime_resume(struct device *dev)
        if (r < 0)
                return r;
 
-       if (venc.tv_dac_clk)
-               clk_prepare_enable(venc.tv_dac_clk);
+       clk_prepare_enable(venc.tv_dac_clk);
 
        return 0;
 }
index f45fe60b9e7d414110d0cf962e3cae5d52a4a01a..ca430ca69ba3e0d0535ee125587ed70b6b1df3a8 100644 (file)
@@ -129,7 +129,6 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
        const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
        const char * const clkin_name[] = { "video1_clk", "video2_clk" };
 
-       struct resource *res;
        struct dss_video_pll *vpll;
        void __iomem *pll_base, *clkctrl_base;
        struct clk *clk;
@@ -138,14 +137,7 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
 
        /* PLL CONTROL */
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]);
-       if (!res) {
-               dev_err(&pdev->dev,
-                       "missing platform resource data for pll%d\n", id);
-               return ERR_PTR(-ENODEV);
-       }
-
-       pll_base = devm_ioremap_resource(&pdev->dev, res);
+       pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]);
        if (IS_ERR(pll_base)) {
                dev_err(&pdev->dev, "failed to ioremap pll%d reg_name\n", id);
                return ERR_CAST(pll_base);
@@ -153,15 +145,7 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
 
        /* CLOCK CONTROL */
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-               clkctrl_name[id]);
-       if (!res) {
-               dev_err(&pdev->dev,
-                       "missing platform resource data for pll%d\n", id);
-               return ERR_PTR(-ENODEV);
-       }
-
-       clkctrl_base = devm_ioremap_resource(&pdev->dev, res);
+       clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]);
        if (IS_ERR(clkctrl_base)) {
                dev_err(&pdev->dev, "failed to ioremap pll%d clkctrl\n", id);
                return ERR_CAST(clkctrl_base);
index 01a7110e61a76a19167fae82d71e1f9efdb312e3..7f79db827b07b2e4669b3feff6df13deee3db4de 100644 (file)
@@ -192,54 +192,6 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
 EXPORT_SYMBOL(sbusfb_ioctl_helper);
 
 #ifdef CONFIG_COMPAT
-static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
-{
-       struct fbcmap32 __user *argp = (void __user *)arg;
-       struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
-       u32 addr;
-       int ret;
-
-       ret = copy_in_user(p, argp, 2 * sizeof(int));
-       ret |= get_user(addr, &argp->red);
-       ret |= put_user(compat_ptr(addr), &p->red);
-       ret |= get_user(addr, &argp->green);
-       ret |= put_user(compat_ptr(addr), &p->green);
-       ret |= get_user(addr, &argp->blue);
-       ret |= put_user(compat_ptr(addr), &p->blue);
-       if (ret)
-               return -EFAULT;
-       return info->fbops->fb_ioctl(info,
-                       (cmd == FBIOPUTCMAP32) ?
-                       FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC,
-                       (unsigned long)p);
-}
-
-static int fbiogscursor(struct fb_info *info, unsigned long arg)
-{
-       struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
-       struct fbcursor32 __user *argp =  (void __user *)arg;
-       compat_uptr_t addr;
-       int ret;
-
-       ret = copy_in_user(p, argp,
-                             2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
-       ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
-       ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
-       ret |= get_user(addr, &argp->cmap.red);
-       ret |= put_user(compat_ptr(addr), &p->cmap.red);
-       ret |= get_user(addr, &argp->cmap.green);
-       ret |= put_user(compat_ptr(addr), &p->cmap.green);
-       ret |= get_user(addr, &argp->cmap.blue);
-       ret |= put_user(compat_ptr(addr), &p->cmap.blue);
-       ret |= get_user(addr, &argp->mask);
-       ret |= put_user(compat_ptr(addr), &p->mask);
-       ret |= get_user(addr, &argp->image);
-       ret |= put_user(compat_ptr(addr), &p->image);
-       if (ret)
-               return -EFAULT;
-       return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
-}
-
 int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
        switch (cmd) {
@@ -248,6 +200,7 @@ int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
        case FBIOGATTR:
        case FBIOSVIDEO:
        case FBIOGVIDEO:
+       case FBIOSCURSOR32:
        case FBIOGCURSOR32:     /* This is not implemented yet.
                                   Later it should be converted... */
        case FBIOSCURPOS:
@@ -255,11 +208,76 @@ int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
        case FBIOGCURMAX:
                return info->fbops->fb_ioctl(info, cmd, arg);
        case FBIOPUTCMAP32:
-               return fbiogetputcmap(info, cmd, arg);
-       case FBIOGETCMAP32:
-               return fbiogetputcmap(info, cmd, arg);
-       case FBIOSCURSOR32:
-               return fbiogscursor(info, arg);
+       case FBIOPUTCMAP_SPARC: {
+               struct fbcmap32 c;
+               struct fb_cmap cmap;
+               u16 red, green, blue;
+               u8 red8, green8, blue8;
+               unsigned char __user *ured;
+               unsigned char __user *ugreen;
+               unsigned char __user *ublue;
+               unsigned int i;
+
+               if (copy_from_user(&c, compat_ptr(arg), sizeof(c)))
+                       return -EFAULT;
+               ured = compat_ptr(c.red);
+               ugreen = compat_ptr(c.green);
+               ublue = compat_ptr(c.blue);
+
+               cmap.len = 1;
+               cmap.red = &red;
+               cmap.green = &green;
+               cmap.blue = &blue;
+               cmap.transp = NULL;
+               for (i = 0; i < c.count; i++) {
+                       int err;
+
+                       if (get_user(red8, &ured[i]) ||
+                           get_user(green8, &ugreen[i]) ||
+                           get_user(blue8, &ublue[i]))
+                               return -EFAULT;
+
+                       red = red8 << 8;
+                       green = green8 << 8;
+                       blue = blue8 << 8;
+
+                       cmap.start = c.index + i;
+                       err = fb_set_cmap(&cmap, info);
+                       if (err)
+                               return err;
+               }
+               return 0;
+       }
+       case FBIOGETCMAP32: {
+               struct fbcmap32 c;
+               unsigned char __user *ured;
+               unsigned char __user *ugreen;
+               unsigned char __user *ublue;
+               struct fb_cmap *cmap = &info->cmap;
+               unsigned int index, i;
+               u8 red, green, blue;
+
+               if (copy_from_user(&c, compat_ptr(arg), sizeof(c)))
+                       return -EFAULT;
+               index = c.index;
+               ured = compat_ptr(c.red);
+               ugreen = compat_ptr(c.green);
+               ublue = compat_ptr(c.blue);
+
+               if (index > cmap->len || c.count > cmap->len - index)
+                       return -EINVAL;
+
+               for (i = 0; i < c.count; i++) {
+                       red = cmap->red[index + i] >> 8;
+                       green = cmap->green[index + i] >> 8;
+                       blue = cmap->blue[index + i] >> 8;
+                       if (put_user(red, &ured[i]) ||
+                           put_user(green, &ugreen[i]) ||
+                           put_user(blue, &ublue[i]))
+                               return -EFAULT;
+               }
+               return 0;
+       }
        default:
                return -ENOIOCTLCMD;
        }
index c1043420dbd3e0e3521f13e32842be4f9765c7bd..aa4ebe3192ec973512279389911c550d4a648296 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/fbcon.h>
-#include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioctl.h>
@@ -341,8 +340,7 @@ static void lcdc_wait_bit(struct sh_mobile_lcdc_priv *priv,
 static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
 {
        if (atomic_inc_and_test(&priv->hw_usecnt)) {
-               if (priv->dot_clk)
-                       clk_prepare_enable(priv->dot_clk);
+               clk_prepare_enable(priv->dot_clk);
                pm_runtime_get_sync(priv->dev);
        }
 }
@@ -351,8 +349,7 @@ static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv)
 {
        if (atomic_sub_return(1, &priv->hw_usecnt) == -1) {
                pm_runtime_put(priv->dev);
-               if (priv->dot_clk)
-                       clk_disable_unprepare(priv->dot_clk);
+               clk_disable_unprepare(priv->dot_clk);
        }
 }
 
index e4b4a2626da464f894bdd790c8cbcb1d13891609..26b19f721ae419836a220e1de0f650024315c3ac 100644 (file)
@@ -1061,8 +1061,6 @@ static const unsigned char SiS300_CHTVVCLKUNTSC[]  = { 0x29,0x29,0x29,0x29,0x2a,
 
 static const unsigned char SiS300_CHTVVCLKONTSC[]  = { 0x2c,0x2c,0x2c,0x2c,0x2d,0x2b };
 
-static const unsigned char SiS300_CHTVVCLKSONTSC[] = { 0x2c,0x2c,0x2c,0x2c,0x2d,0x2b };
-
 static const unsigned char SiS300_CHTVVCLKUPAL[]   = { 0x2f,0x2f,0x2f,0x2f,0x2f,0x31 };
 
 static const unsigned char SiS300_CHTVVCLKOPAL[]   = { 0x2f,0x2f,0x2f,0x2f,0x30,0x32 };
index c3dfd2a20cf9ff0bbb9bb2e8be9ba799826d85bd..98d20965866235a93e5ce48e3539c3535dd67991 100644 (file)
 
 #define SiS300Idle \
   { \
-       while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
-       while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
-       while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
+       while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
+       while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
+       while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
        CmdQueLen = MMIO_IN16(ivideo->mmio_vbase, 0x8240); \
   }
 /* (do three times, because 2D engine seems quite unsure about whether or not it's idle) */
 
 #define SiS310Idle \
   { \
-       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
-       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
-       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
-       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
+       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+       while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
        CmdQueLen = 0; \
   }
 
index d07c851d255b50f8a13b47ad0299fda47fe1a012..413fd0ca56a8ff16b3c45e5868facd25232951ae 100644 (file)
@@ -308,7 +308,6 @@ struct __drm_private_objs_state {
  * struct drm_atomic_state - the global state object for atomic updates
  * @ref: count of all references to this state (will not be freed until zero)
  * @dev: parent DRM device
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
  * @async_update: hint for asynchronous plane update
  * @planes: pointer to array of structures with per-plane data
  * @crtcs: pointer to array of CRTC pointers
@@ -336,6 +335,17 @@ struct drm_atomic_state {
         * drm_atomic_crtc_needs_modeset().
         */
        bool allow_modeset : 1;
+       /**
+        * @legacy_cursor_update:
+        *
+        * Hint to enforce legacy cursor IOCTL semantics.
+        *
+        * WARNING: This is thoroughly broken and pretty much impossible to
+        * implement correctly. Drivers must ignore this and should instead
+        * implement &drm_plane_helper_funcs.atomic_async_check and
+        * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this
+        * flag are not allowed.
+        */
        bool legacy_cursor_update : 1;
        bool async_update : 1;
        /**
index a53243abd9450508bb302d59122607dede8eb049..1f0ee7f3f473c63cedbfeffb2e1e78135d344b3a 100644 (file)
@@ -106,8 +106,9 @@ struct drm_device;
 #define DP_AUX_I2C_REPLY_DEFER         (0x2 << 2)
 #define DP_AUX_I2C_REPLY_MASK          (0x3 << 2)
 
-/* AUX CH addresses */
-/* DPCD */
+/* DPCD Field Address Mapping */
+
+/* Receiver Capability */
 #define DP_DPCD_REV                         0x000
 # define DP_DPCD_REV_10                     0x10
 # define DP_DPCD_REV_11                     0x11
@@ -124,6 +125,7 @@ struct drm_device;
 
 #define DP_MAX_DOWNSPREAD                   0x003
 # define DP_MAX_DOWNSPREAD_0_5             (1 << 0)
+# define DP_STREAM_REGENERATION_STATUS_CAP  (1 << 1) /* 2.0 */
 # define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
 # define DP_TPS4_SUPPORTED                  (1 << 7)
 
@@ -141,6 +143,7 @@ struct drm_device;
 
 #define DP_MAIN_LINK_CHANNEL_CODING         0x006
 # define DP_CAP_ANSI_8B10B                 (1 << 0)
+# define DP_CAP_ANSI_128B132B               (1 << 1) /* 2.0 */
 
 #define DP_DOWN_STREAM_PORT_COUNT          0x007
 # define DP_PORT_COUNT_MASK                0x0f
@@ -184,8 +187,14 @@ struct drm_device;
 #define DP_FAUX_CAP                        0x020   /* 1.2 */
 # define DP_FAUX_CAP_1                     (1 << 0)
 
+#define DP_SINK_VIDEO_FALLBACK_FORMATS      0x020   /* 2.0 */
+# define DP_FALLBACK_1024x768_60HZ_24BPP    (1 << 0)
+# define DP_FALLBACK_1280x720_60HZ_24BPP    (1 << 1)
+# define DP_FALLBACK_1920x1080_60HZ_24BPP   (1 << 2)
+
 #define DP_MSTM_CAP                        0x021   /* 1.2 */
 # define DP_MST_CAP                        (1 << 0)
+# define DP_SINGLE_STREAM_SIDEBAND_MSG      (1 << 1) /* 2.0 */
 
 #define DP_NUMBER_OF_AUDIO_ENDPOINTS       0x022   /* 1.2 */
 
@@ -426,13 +435,16 @@ struct drm_device;
 #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1  0x0a1
 #define DP_DSC_BRANCH_MAX_LINE_WIDTH        0x0a2
 
-/* link configuration */
+/* Link Configuration */
 #define        DP_LINK_BW_SET                      0x100
 # define DP_LINK_RATE_TABLE                0x00    /* eDP 1.4 */
 # define DP_LINK_BW_1_62                   0x06
 # define DP_LINK_BW_2_7                            0x0a
 # define DP_LINK_BW_5_4                            0x14    /* 1.2 */
 # define DP_LINK_BW_8_1                            0x1e    /* 1.4 */
+# define DP_LINK_BW_10                      0x01    /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_13_5                    0x04    /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_20                      0x02    /* 2.0 128b/132b Link Layer */
 
 #define DP_LANE_COUNT_SET                  0x101
 # define DP_LANE_COUNT_MASK                0x0f
@@ -484,12 +496,15 @@ struct drm_device;
 # define DP_TRAIN_PRE_EMPHASIS_SHIFT       3
 # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED  (1 << 5)
 
+# define DP_TX_FFE_PRESET_VALUE_MASK        (0xf << 0) /* 2.0 128b/132b Link Layer */
+
 #define DP_DOWNSPREAD_CTRL                 0x107
 # define DP_SPREAD_AMP_0_5                 (1 << 4)
 # define DP_MSA_TIMING_PAR_IGNORE_EN       (1 << 7) /* eDP */
 
 #define DP_MAIN_LINK_CHANNEL_CODING_SET            0x108
 # define DP_SET_ANSI_8B10B                 (1 << 0)
+# define DP_SET_ANSI_128B132B               (1 << 1)
 
 #define DP_I2C_SPEED_CONTROL_STATUS        0x109   /* DPI */
 /* bitmask as for DP_I2C_SPEED_CAP */
@@ -508,8 +523,19 @@ struct drm_device;
 # define DP_LINK_QUAL_PATTERN_ERROR_RATE    2
 # define DP_LINK_QUAL_PATTERN_PRBS7        3
 # define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM  4
-# define DP_LINK_QUAL_PATTERN_HBR2_EYE      5
-# define DP_LINK_QUAL_PATTERN_MASK         7
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1  5
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2  6
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3  7
+/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10
+# define DP_LINK_QUAL_PATTERN_PRSBS9        0x18
+# define DP_LINK_QUAL_PATTERN_PRSBS11       0x20
+# define DP_LINK_QUAL_PATTERN_PRSBS15       0x28
+# define DP_LINK_QUAL_PATTERN_PRSBS23       0x30
+# define DP_LINK_QUAL_PATTERN_PRSBS31       0x38
+# define DP_LINK_QUAL_PATTERN_CUSTOM        0x40
+# define DP_LINK_QUAL_PATTERN_SQUARE        0x48
 
 #define DP_TRAINING_LANE0_1_SET2           0x10f
 #define DP_TRAINING_LANE2_3_SET2           0x110
@@ -580,6 +606,7 @@ struct drm_device;
 #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
 #define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
 
+/* Link/Sink Device Status */
 #define DP_SINK_COUNT                      0x200
 /* prior to 1.2 bit 7 was reserved mbz */
 # define DP_GET_SINK_COUNT(x)              ((((x) & 0x80) >> 1) | ((x) & 0x3f))
@@ -611,9 +638,9 @@ struct drm_device;
 #define DP_LINK_STATUS_UPDATED             (1 << 7)
 
 #define DP_SINK_STATUS                     0x205
-
-#define DP_RECEIVE_PORT_0_STATUS           (1 << 0)
-#define DP_RECEIVE_PORT_1_STATUS           (1 << 1)
+# define DP_RECEIVE_PORT_0_STATUS          (1 << 0)
+# define DP_RECEIVE_PORT_1_STATUS          (1 << 1)
+# define DP_STREAM_REGENERATION_STATUS      (1 << 2) /* 2.0 */
 
 #define DP_ADJUST_REQUEST_LANE0_1          0x206
 #define DP_ADJUST_REQUEST_LANE2_3          0x207
@@ -626,6 +653,12 @@ struct drm_device;
 # define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
 # define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
 
+/* DP 2.0 128b/132b Link Layer */
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK  (0xf << 0)
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK  (0xf << 4)
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4
+
 #define DP_ADJUST_REQUEST_POST_CURSOR2      0x20c
 # define DP_ADJUST_POST_CURSOR2_LANE0_MASK  0x03
 # define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
@@ -779,20 +812,27 @@ struct drm_device;
 #define DP_VC_PAYLOAD_ID_SLOT_1             0x2c1   /* 1.2 MST */
 /* up to ID_SLOT_63 at 0x2ff */
 
+/* Source Device-specific */
 #define DP_SOURCE_OUI                      0x300
+
+/* Sink Device-specific */
 #define DP_SINK_OUI                        0x400
+
+/* Branch Device-specific */
 #define DP_BRANCH_OUI                      0x500
 #define DP_BRANCH_ID                        0x503
 #define DP_BRANCH_REVISION_START            0x509
 #define DP_BRANCH_HW_REV                    0x509
 #define DP_BRANCH_SW_REV                    0x50A
 
+/* Link/Sink Device Power Control */
 #define DP_SET_POWER                        0x600
 # define DP_SET_POWER_D0                    0x1
 # define DP_SET_POWER_D3                    0x2
 # define DP_SET_POWER_MASK                  0x3
 # define DP_SET_POWER_D3_AUX_ON             0x5
 
+/* eDP-specific */
 #define DP_EDP_DPCD_REV                            0x700    /* eDP 1.2 */
 # define DP_EDP_11                         0x00
 # define DP_EDP_12                         0x01
@@ -876,11 +916,13 @@ struct drm_device;
 #define DP_EDP_REGIONAL_BACKLIGHT_BASE      0x740    /* eDP 1.4 */
 #define DP_EDP_REGIONAL_BACKLIGHT_0        0x741    /* eDP 1.4 */
 
+/* Sideband MSG Buffers */
 #define DP_SIDEBAND_MSG_DOWN_REQ_BASE      0x1000   /* 1.2 MST */
 #define DP_SIDEBAND_MSG_UP_REP_BASE        0x1200   /* 1.2 MST */
 #define DP_SIDEBAND_MSG_DOWN_REP_BASE      0x1400   /* 1.2 MST */
 #define DP_SIDEBAND_MSG_UP_REQ_BASE        0x1600   /* 1.2 MST */
 
+/* DPRX Event Status Indicator */
 #define DP_SINK_COUNT_ESI                  0x2002   /* 1.2 */
 /* 0-5 sink count */
 # define DP_SINK_COUNT_CP_READY             (1 << 6)
@@ -934,8 +976,8 @@ struct drm_device;
 #define DP_LANE_ALIGN_STATUS_UPDATED_ESI       0x200e /* status same as 0x204 */
 #define DP_SINK_STATUS_ESI                     0x200f /* status same as 0x205 */
 
+/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
 #define DP_DP13_DPCD_REV                    0x2200
-#define DP_DP13_MAX_LINK_RATE               0x2201
 
 #define DP_DPRX_FEATURE_ENUMERATION_LIST    0x2210  /* DP 1.3 */
 # define DP_GTC_CAP                                    (1 << 0)  /* DP 1.3 */
@@ -947,6 +989,15 @@ struct drm_device;
 # define DP_VSC_EXT_CEA_SDP_SUPPORTED                  (1 << 6)  /* DP 1.4 */
 # define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED         (1 << 7)  /* DP 1.4 */
 
+#define DP_128B132B_SUPPORTED_LINK_RATES       0x2215 /* 2.0 */
+# define DP_UHBR10                             (1 << 0)
+# define DP_UHBR20                             (1 << 1)
+# define DP_UHBR13_5                           (1 << 2)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL   0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+
+/* Protocol Converter Extension */
 /* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
 #define DP_CEC_TUNNELING_CAPABILITY            0x3000
 # define DP_CEC_TUNNELING_CAPABLE               (1 << 0)
@@ -1013,6 +1064,7 @@ struct drm_device;
 #define DP_PROTOCOL_CONVERTER_CONTROL_2                0x3052 /* DP 1.3 */
 # define DP_CONVERSION_TO_YCBCR422_ENABLE      (1 << 0) /* DP 1.3 */
 
+/* HDCP 1.3 and HDCP 2.2 */
 #define DP_AUX_HDCP_BKSV               0x68000
 #define DP_AUX_HDCP_RI_PRIME           0x68005
 #define DP_AUX_HDCP_AKSV               0x68007
@@ -1058,7 +1110,7 @@ struct drm_device;
 #define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET     0x69494
 #define DP_HDCP_2_2_REG_DBG_OFFSET             0x69518
 
-/* Link Training (LT)-tunable PHY Repeaters */
+/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
 #define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
 #define DP_MAX_LINK_RATE_PHY_REPEATER                      0xf0001 /* 1.4a */
 #define DP_PHY_REPEATER_CNT                                0xf0002 /* 1.3 */
index e57d0440f00f676cec180c2eb06d5c9ad70b008f..023076255a7fd56ae6ba6e7e3fc8ccc4af4b9587 100644 (file)
@@ -36,10 +36,12 @@ struct drm_file;
 struct drm_gem_object;
 struct drm_master;
 struct drm_minor;
+struct dma_buf;
 struct dma_buf_attachment;
 struct drm_display_mode;
 struct drm_mode_create_dumb;
 struct drm_printer;
+struct sg_table;
 
 /**
  * enum drm_driver_feature - feature flags
@@ -326,32 +328,6 @@ struct drm_driver {
         */
        void (*debugfs_init)(struct drm_minor *minor);
 
-       /**
-        * @gem_free_object_unlocked: deconstructor for drm_gem_objects
-        *
-        * This is deprecated and should not be used by new drivers. Use
-        * &drm_gem_object_funcs.free instead.
-        */
-       void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
-       /**
-        * @gem_open_object:
-        *
-        * This callback is deprecated in favour of &drm_gem_object_funcs.open.
-        *
-        * Driver hook called upon gem handle creation
-        */
-       int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
-
-       /**
-        * @gem_close_object:
-        *
-        * This callback is deprecated in favour of &drm_gem_object_funcs.close.
-        *
-        * Driver hook called upon gem handle release
-        */
-       void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
        /**
         * @gem_create_object: constructor for gem objects
         *
@@ -360,6 +336,7 @@ struct drm_driver {
         */
        struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
                                                    size_t size);
+
        /**
         * @prime_handle_to_fd:
         *
@@ -382,14 +359,7 @@ struct drm_driver {
         */
        int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
                                int prime_fd, uint32_t *handle);
-       /**
-        * @gem_prime_export:
-        *
-        * Export hook for GEM drivers. Deprecated in favour of
-        * &drm_gem_object_funcs.export.
-        */
-       struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
-                                            int flags);
+
        /**
         * @gem_prime_import:
         *
@@ -399,29 +369,6 @@ struct drm_driver {
         */
        struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
                                struct dma_buf *dma_buf);
-
-       /**
-        * @gem_prime_pin:
-        *
-        * Deprecated hook in favour of &drm_gem_object_funcs.pin.
-        */
-       int (*gem_prime_pin)(struct drm_gem_object *obj);
-
-       /**
-        * @gem_prime_unpin:
-        *
-        * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
-        */
-       void (*gem_prime_unpin)(struct drm_gem_object *obj);
-
-
-       /**
-        * @gem_prime_get_sg_table:
-        *
-        * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
-        */
-       struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
-
        /**
         * @gem_prime_import_sg_table:
         *
@@ -432,22 +379,6 @@ struct drm_driver {
                                struct drm_device *dev,
                                struct dma_buf_attachment *attach,
                                struct sg_table *sgt);
-       /**
-        * @gem_prime_vmap:
-        *
-        * Deprecated vmap hook for GEM drivers. Please use
-        * &drm_gem_object_funcs.vmap instead.
-        */
-       void *(*gem_prime_vmap)(struct drm_gem_object *obj);
-
-       /**
-        * @gem_prime_vunmap:
-        *
-        * Deprecated vunmap hook for GEM drivers. Please use
-        * &drm_gem_object_funcs.vunmap instead.
-        */
-       void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
-
        /**
         * @gem_prime_mmap:
         *
@@ -522,14 +453,6 @@ struct drm_driver {
                            struct drm_device *dev,
                            uint32_t handle);
 
-       /**
-        * @gem_vm_ops: Driver private ops for this object
-        *
-        * For GEM drivers this is deprecated in favour of
-        * &drm_gem_object_funcs.vm_ops.
-        */
-       const struct vm_operations_struct *gem_vm_ops;
-
        /** @major: driver major number */
        int major;
        /** @minor: driver minor number */
index 337a48321705e0a4eb2c8346340fa8db456286bc..c38dd35da00bfdf1e8adec1aa031d1e5c6af63bc 100644 (file)
@@ -272,7 +272,7 @@ struct drm_gem_object {
         * attachment point for the device. This is invariant over the lifetime
         * of a gem object.
         *
-        * The &drm_driver.gem_free_object_unlocked callback is responsible for
+        * The &drm_gem_object_funcs.free callback is responsible for
         * cleaning up the dma_buf attachment and references acquired at import
         * time.
         *
index 62cc6e6c3a4fdb08eb9fafe274c72660c890f519..128f88174d321c8a0916152d8e870ae675370f8e 100644 (file)
@@ -35,7 +35,6 @@ struct vm_area_struct;
  * @placement: TTM placement information. Supported placements are \
        %TTM_PL_VRAM and %TTM_PL_SYSTEM
  * @placements:        TTM placement information.
- * @pin_count: Pin counter
  *
  * The type struct drm_gem_vram_object represents a GEM object that is
  * backed by VRAM. It can be used for simple framebuffer devices with
@@ -64,8 +63,6 @@ struct drm_gem_vram_object {
        /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
        struct ttm_placement placement;
        struct ttm_place placements[2];
-
-       int pin_count;
 };
 
 /**
index a18f73eb3cf6e252b28a1d15a0cc767f0a346623..5ffbb4ed5b35e74ce977636361eccc10f28e829b 100644 (file)
@@ -58,6 +58,12 @@ struct drm_mode_config_funcs {
         * actual modifier used if the request doesn't have it specified,
         * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
         *
+        * IMPORTANT: These implied modifiers for legacy userspace must be
+        * stored in struct &drm_framebuffer, including all relevant metadata
+        * like &drm_framebuffer.pitches and &drm_framebuffer.offsets if the
+        * modifier enables additional planes beyond the fourcc pixel format
+        * code. This is required by the GETFB2 ioctl.
+        *
         * If the parameters are deemed valid and the backing storage objects in
         * the underlying memory manager all exist, then the driver allocates
         * a new &drm_framebuffer structure, subclassed to contain
@@ -915,6 +921,13 @@ struct drm_mode_config {
         * @allow_fb_modifiers:
         *
         * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+        *
+        * IMPORTANT:
+        *
+        * If this is set the driver must fill out the full implicit modifier
+        * information in their &drm_mode_config_funcs.fb_create hook for legacy
+        * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is
+        * broken for modifier aware userspace.
         */
        bool allow_fb_modifiers;
 
index 4efec30f8badc3a9276669819fdfa83a77d1309d..bde42988c4b5b7323a1e7089086c9be27412fcd6 100644 (file)
@@ -417,14 +417,10 @@ struct drm_crtc_helper_funcs {
         * @atomic_enable must be the inverse of @atomic_disable for atomic
         * drivers.
         *
-        * Drivers can use the @old_crtc_state input parameter if the operations
-        * needed to enable the CRTC don't depend solely on the new state but
-        * also on the transition between the old state and the new state.
-        *
         * This function is optional.
         */
        void (*atomic_enable)(struct drm_crtc *crtc,
-                             struct drm_crtc_state *old_crtc_state);
+                             struct drm_atomic_state *state);
 
        /**
         * @atomic_disable:
@@ -441,15 +437,10 @@ struct drm_crtc_helper_funcs {
         * need to implement it if there's no need to disable anything at the
         * CRTC level.
         *
-        * Comparing to @disable, this one provides the additional input
-        * parameter @old_crtc_state which could be used to access the old
-        * state. Atomic drivers should consider to use this one instead
-        * of @disable.
-        *
         * This function is optional.
         */
        void (*atomic_disable)(struct drm_crtc *crtc,
-                              struct drm_crtc_state *old_crtc_state);
+                              struct drm_atomic_state *state);
 
        /**
         * @get_scanout_position:
index 0f69f9fbf12cd9ff4fa610ad5d046af147268426..0991a47a1567014f3f133e05d0215901055841ca 100644 (file)
@@ -54,6 +54,7 @@ struct device;
 struct dma_buf_export_info;
 struct dma_buf;
 struct dma_buf_attachment;
+struct dma_buf_map;
 
 enum dma_data_direction;
 
@@ -82,8 +83,8 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                           struct sg_table *sgt,
                           enum dma_data_direction dir);
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
 
 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
index 0f7cd21d6d7489a0afce9573c9f0679822410a1e..37102e45e496b5387af06a280413de3707fedfbf 100644 (file)
@@ -90,9 +90,6 @@ struct ttm_tt;
  * @kref: Reference count of this buffer object. When this refcount reaches
  * zero, the object is destroyed or put on the delayed delete list.
  * @mem: structure describing current placement.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object.
  * @ttm: TTM structure holding system pages.
  * @evicted: Whether the object was evicted without user-space knowing.
  * @deleted: True if the object is only a zombie and already deleted.
@@ -139,7 +136,6 @@ struct ttm_buffer_object {
         */
 
        struct ttm_resource mem;
-       struct file *persistent_swap_storage;
        struct ttm_tt *ttm;
        bool deleted;
 
@@ -157,6 +153,7 @@ struct ttm_buffer_object {
 
        struct dma_fence *moving;
        unsigned priority;
+       unsigned pin_count;
 
        /**
         * Special members that are protected by the reserve lock
@@ -261,6 +258,11 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
  */
 int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
 
+static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+       return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+}
+
 /**
  * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
  *
@@ -446,50 +448,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
                struct sg_table *sg, struct dma_resv *resv,
                void (*destroy) (struct ttm_buffer_object *));
 
-/**
- * ttm_bo_create
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep while waiting for GPU resources,
- * sleep interruptible.
- * @p_bo: On successful completion *p_bo points to the created object.
- *
- * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
- * on that object. The destroy function is set to kfree().
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while waiting for resources.
- */
-int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
-                 enum ttm_bo_type type, struct ttm_placement *placement,
-                 uint32_t page_alignment, bool interruptible,
-                 struct ttm_buffer_object **p_bo);
-
-/**
- * ttm_bo_evict_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Evicts all buffers on the lru list of the memory type.
- * This is normally part of a VT switch or an
- * out-of-memory-space-due-to-fragmentation handler.
- * The caller must make sure that there are no other processes
- * currently validating buffers, and can do that by taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: Invalid or uninitialized memory type.
- * -ERESTARTSYS: The call was interrupted by a signal while waiting to
- * evict a buffer.
- */
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
-
 /**
  * ttm_kmap_obj_virtual
  *
@@ -583,9 +541,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
                  const char __user *wbuf, char __user *rbuf,
                  size_t count, loff_t *f_pos, bool write);
 
-int ttm_bo_swapout(struct ttm_bo_global *glob,
-                       struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(void);
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx);
 
 /**
  * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -606,6 +562,31 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
        return bo->base.dev != NULL;
 }
 
+/**
+ * ttm_bo_pin - Pin the buffer object.
+ * @bo: The buffer object to pin
+ *
+ * Make sure the buffer is not evicted any more during memory pressure.
+ */
+static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+{
+       dma_resv_assert_held(bo->base.resv);
+       ++bo->pin_count;
+}
+
+/**
+ * ttm_bo_unpin - Unpin the buffer object.
+ * @bo: The buffer object to unpin
+ *
+ * Allows the buffer object to be evicted again during memory pressure.
+ */
+static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+{
+       dma_resv_assert_held(bo->base.resv);
+       WARN_ON_ONCE(!bo->pin_count);
+       --bo->pin_count;
+}
+
 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                        struct ttm_resource_manager *man,
                        const struct ttm_place *place,
index 864afa8f6f18f4eb80057757e49866c4e8150aba..e9f683fa72dce9d27bc91421282c6bb9c751f75d 100644 (file)
@@ -42,6 +42,7 @@
 #include "ttm_module.h"
 #include "ttm_placement.h"
 #include "ttm_tt.h"
+#include "ttm_pool.h"
 
 /**
  * struct ttm_bo_driver
@@ -90,31 +91,6 @@ struct ttm_bo_driver {
         */
        void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
 
-       /**
-        * ttm_tt_bind
-        *
-        * @bdev: Pointer to a ttm device
-        * @ttm: Pointer to a struct ttm_tt.
-        * @bo_mem: Pointer to a struct ttm_resource describing the
-        * memory type and location for binding.
-        *
-        * Bind the backend pages into the aperture in the location
-        * indicated by @bo_mem. This function should be able to handle
-        * differences between aperture and system page sizes.
-        */
-       int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem);
-
-       /**
-        * ttm_tt_unbind
-        *
-        * @bdev: Pointer to a ttm device
-        * @ttm: Pointer to a struct ttm_tt.
-        *
-        * Unbind previously bound backend pages. This function should be
-        * able to handle differences between aperture and system page sizes.
-        */
-       void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
        /**
         * ttm_tt_destroy
         *
@@ -181,18 +157,9 @@ struct ttm_bo_driver {
                             struct file *filp);
 
        /**
-        * Hook to notify driver about a driver move so it
-        * can do tiling things and book-keeping.
-        *
-        * @evict: whether this move is evicting the buffer from the graphics
-        * address space
+        * Hook to notify driver about a resource delete.
         */
-       void (*move_notify)(struct ttm_buffer_object *bo,
-                           bool evict,
-                           struct ttm_resource *new_mem);
-       /* notify the driver we are taking a fault on this BO
-        * and have reserved it */
-       int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+       void (*delete_mem_notify)(struct ttm_buffer_object *bo);
 
        /**
         * notify the driver that we're about to swap out this bo
@@ -329,6 +296,7 @@ struct ttm_bo_device {
         * Protected by internal locks.
         */
        struct drm_vma_offset_manager *vma_manager;
+       struct ttm_pool pool;
 
        /*
         * Protected by the global:lru lock.
@@ -347,8 +315,6 @@ struct ttm_bo_device {
 
        struct delayed_work wq;
 
-       bool need_dma32;
-
        bool no_retry;
 };
 
@@ -429,11 +395,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
  * @bdev: A pointer to a struct ttm_bo_device to initialize.
  * @glob: A pointer to an initialized struct ttm_bo_global.
  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @dev: The core kernel device pointer for DMA mappings and allocations.
  * @mapping: The address space to use for this bo.
  * @vma_manager: A pointer to a vma manager.
- * @file_page_offset: Offset into the device address space that is available
- * for buffer data. This ensures compatibility with other users of the
- * address space.
+ * @use_dma_alloc: If coherent DMA allocation API should be used.
+ * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
  *
  * Initializes a struct ttm_bo_device:
  * Returns:
@@ -441,9 +407,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
  */
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
                       struct ttm_bo_driver *driver,
+                      struct device *dev,
                       struct address_space *mapping,
                       struct drm_vma_offset_manager *vma_manager,
-                      bool need_dma32);
+                      bool use_dma_alloc, bool use_dma32);
 
 /**
  * ttm_bo_unmap_virtual
@@ -452,15 +419,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
  */
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- *
- * The caller must take ttm_mem_io_lock before calling this function.
- */
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
 /**
  * ttm_bo_reserve:
  *
@@ -578,32 +536,10 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 /*
  * ttm_bo_util.c
  */
-
 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
                       struct ttm_resource *mem);
 void ttm_mem_io_free(struct ttm_bo_device *bdev,
                     struct ttm_resource *mem);
-/**
- * ttm_bo_move_ttm
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Optimized move function for a buffer object with both old and
- * new placement backed by a TTM. The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   struct ttm_operation_ctx *ctx,
-                   struct ttm_resource *new_mem);
 
 /**
  * ttm_bo_move_memcpy
@@ -627,15 +563,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                       struct ttm_operation_ctx *ctx,
                       struct ttm_resource *new_mem);
 
-/**
- * ttm_bo_free_old_node
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Utility function to free an old placement after a successful move.
- */
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
-
 /**
  * ttm_bo_move_accel_cleanup.
  *
@@ -669,13 +596,15 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
 /**
  * ttm_io_prot
  *
- * @c_state: Caching state.
+ * bo: ttm buffer object
+ * res: ttm resource object
  * @tmp: Page protection flag for a normal, cached mapping.
  *
  * Utility function that returns the pgprot_t that should be used for
  * setting up a PTE with the caching model indicated by @c_state.
  */
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+                    pgprot_t tmp);
 
 /**
  * ttm_bo_tt_bind
@@ -684,13 +613,6 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
  */
 int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
 
-/**
- * ttm_bo_tt_bind
- *
- * Unbind the object tt from a memory resource.
- */
-void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
-
 /**
  * ttm_bo_tt_destroy.
  */
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
new file mode 100644 (file)
index 0000000..a0b4a49
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_CACHING_H_
+#define _TTM_CACHING_H_
+
+#define TTM_NUM_CACHING_TYPES  3
+
+enum ttm_caching {
+       ttm_uncached,
+       ttm_write_combined,
+       ttm_cached
+};
+
+#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
deleted file mode 100644 (file)
index a6b6ef5..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- *          Jerome Glisse <jglisse@redhat.com>
- */
-#ifndef TTM_PAGE_ALLOC
-#define TTM_PAGE_ALLOC
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_memory.h>
-
-struct device;
-
-/**
- * Initialize pool allocator.
- */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-/**
- * Free pool allocator.
- */
-void ttm_page_alloc_fini(void);
-
-/**
- * ttm_pool_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_pool_unpopulate:
- *
- * @ttm: The struct ttm_tt which to free backing pages.
- *
- * Free all pages of @ttm
- */
-void ttm_pool_unpopulate(struct ttm_tt *ttm);
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
-                               struct ttm_operation_ctx *ctx);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-
-#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
-/**
- * Initialize pool allocator.
- */
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-
-/**
- * Free pool allocator.
- */
-void ttm_dma_page_alloc_fini(void);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
-                       struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
-
-#else
-static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
-                                         unsigned max_pages)
-{
-       return -ENODEV;
-}
-
-static inline void ttm_dma_page_alloc_fini(void) { return; }
-
-static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
-       return 0;
-}
-static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
-                               struct device *dev,
-                               struct ttm_operation_ctx *ctx)
-{
-       return -ENOMEM;
-}
-static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
-                                     struct device *dev)
-{
-}
-#endif
-
-#endif
index d4022655eae40650c2d0b47ac90ef85991fcaa14..aa6ba4d0cf78412877c37acbded36539a471b1aa 100644 (file)
 #define TTM_PL_PRIV             3
 
 /*
- * Other flags that affects data placement.
- * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
- * if available.
- * TTM_PL_FLAG_SHARED means that another application may
- * reference the buffer.
- * TTM_PL_FLAG_NO_EVICT means that the buffer may never
- * be evicted to make room for other buffers.
  * TTM_PL_FLAG_TOPDOWN requests to be placed from the
  * top of the memory area, instead of the bottom.
  */
 
-#define TTM_PL_FLAG_CACHED      (1 << 16)
-#define TTM_PL_FLAG_UNCACHED    (1 << 17)
-#define TTM_PL_FLAG_WC          (1 << 18)
 #define TTM_PL_FLAG_CONTIGUOUS  (1 << 19)
-#define TTM_PL_FLAG_NO_EVICT    (1 << 21)
 #define TTM_PL_FLAG_TOPDOWN     (1 << 22)
 
-#define TTM_PL_MASK_CACHING     (TTM_PL_FLAG_CACHED | \
-                                TTM_PL_FLAG_UNCACHED | \
-                                TTM_PL_FLAG_WC)
-
 /**
  * struct ttm_place
  *
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
new file mode 100644 (file)
index 0000000..4321728
--- /dev/null
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_PAGE_POOL_H_
+#define _TTM_PAGE_POOL_H_
+
+#include <linux/mmzone.h>
+#include <linux/llist.h>
+#include <linux/spinlock.h>
+#include <drm/ttm/ttm_caching.h>
+
+struct device;
+struct ttm_tt;
+struct ttm_pool;
+struct ttm_operation_ctx;
+
+/**
+ * ttm_pool_type - Pool for a certain memory type
+ *
+ * @pool: the pool we belong to, might be NULL for the global ones
+ * @order: the allocation order our pages have
+ * @caching: the caching type our pages have
+ * @shrinker_list: our place on the global shrinker list
+ * @lock: protection of the page list
+ * @pages: the list of pages in the pool
+ */
+struct ttm_pool_type {
+       struct ttm_pool *pool;
+       unsigned int order;
+       enum ttm_caching caching;
+
+       struct list_head shrinker_list;
+
+       spinlock_t lock;
+       struct list_head pages;
+};
+
+/**
+ * ttm_pool - Pool for all caching and orders
+ *
+ * @use_dma_alloc: if coherent DMA allocations should be used
+ * @use_dma32: if GFP_DMA32 should be used
+ * @caching: pools for each caching/order
+ */
+struct ttm_pool {
+       struct device *dev;
+
+       bool use_dma_alloc;
+       bool use_dma32;
+
+       struct {
+               struct ttm_pool_type orders[MAX_ORDER];
+       } caching[TTM_NUM_CACHING_TYPES];
+};
+
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+                  struct ttm_operation_ctx *ctx);
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
+
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+                  bool use_dma_alloc, bool use_dma32);
+void ttm_pool_fini(struct ttm_pool *pool);
+
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+
+int ttm_pool_mgr_init(unsigned long num_pages);
+void ttm_pool_mgr_fini(void);
+
+#endif
index 0e172d94a0c1e60db2797f2fb1562b725a2df897..f48a70d39ac56c1b106654bd28dd8f49deb58dd1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/dma-fence.h>
 #include <drm/drm_print.h>
+#include <drm/ttm/ttm_caching.h>
 
 #define TTM_MAX_BO_PRIORITY    4U
 
@@ -148,9 +149,10 @@ struct ttm_resource_manager {
  * Structure indicating the bus placement of an object.
  */
 struct ttm_bus_placement {
-       void            *addr;
-       phys_addr_t     offset;
-       bool            is_iomem;
+       void                    *addr;
+       phys_addr_t             offset;
+       bool                    is_iomem;
+       enum ttm_caching        caching;
 };
 
 /**
@@ -228,8 +230,8 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
 void ttm_resource_manager_init(struct ttm_resource_manager *man,
                               unsigned long p_size);
 
-int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
-                                         struct ttm_resource_manager *man);
+int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+                                  struct ttm_resource_manager *man);
 
 void ttm_resource_manager_debug(struct ttm_resource_manager *man,
                                struct drm_printer *p);
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
deleted file mode 100644 (file)
index 7c492b4..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2018 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Huang Rui <ray.huang@amd.com>
- */
-
-#ifndef TTM_SET_MEMORY
-#define TTM_SET_MEMORY
-
-#include <linux/mm.h>
-
-#ifdef CONFIG_X86
-
-#include <asm/set_memory.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
-       return set_pages_array_wb(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
-       return set_pages_array_wc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
-       return set_pages_array_uc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
-       return set_pages_wb(page, numpages);
-}
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
-       unsigned long addr = (unsigned long)page_address(page);
-
-       return set_memory_wc(addr, numpages);
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
-       return set_pages_uc(page, numpages);
-}
-
-#else /* for CONFIG_X86 */
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#include <asm/agp.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               unmap_page_from_agp(pages[i]);
-       return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               map_page_into_agp(pages[i]);
-       return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               map_page_into_agp(pages[i]);
-       return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
-       int i;
-
-       for (i = 0; i < numpages; i++)
-               unmap_page_from_agp(page++);
-       return 0;
-}
-
-#else /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
-       return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
-       return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
-       return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
-       return 0;
-}
-
-#endif /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
-       return 0;
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
-       return 0;
-}
-
-#endif /* for CONFIG_X86 */
-
-#endif
index 75208c0a0cac0c98c7902c889cad0c7dd3914670..da27e9d8fa64ee509c7a5a208e69a86dbaf51ad0 100644 (file)
 #define _TTM_TT_H_
 
 #include <linux/types.h>
+#include <drm/ttm/ttm_caching.h>
 
 struct ttm_tt;
 struct ttm_resource;
 struct ttm_buffer_object;
 struct ttm_operation_ctx;
 
-#define TTM_PAGE_FLAG_WRITE           (1 << 3)
 #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
 #define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
-#define TTM_PAGE_FLAG_DMA32           (1 << 7)
 #define TTM_PAGE_FLAG_SG              (1 << 8)
 #define TTM_PAGE_FLAG_NO_RETRY       (1 << 9)
 
 #define TTM_PAGE_FLAG_PRIV_POPULATED  (1 << 31)
 
-enum ttm_caching_state {
-       tt_uncached,
-       tt_wc,
-       tt_cached
-};
-
 /**
  * struct ttm_tt
  *
  * @pages: Array of pages backing the data.
+ * @page_flags: see TTM_PAGE_FLAG_*
  * @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
+ * @sg: for SG objects via dma-buf
+ * @dma_address: The DMA (bus) addresses of the pages
  * @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
+ * @pages_list: used by some page allocation backend
+ * @caching: The current caching state of the pages.
  *
  * This is a structure holding the pages, caching- and aperture binding
  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -68,10 +61,11 @@ enum ttm_caching_state {
 struct ttm_tt {
        struct page **pages;
        uint32_t page_flags;
-       unsigned long num_pages;
-       struct sg_table *sg; /* for SG objects via dma-buf */
+       uint32_t num_pages;
+       struct sg_table *sg;
+       dma_addr_t *dma_address;
        struct file *swap_storage;
-       enum ttm_caching_state caching_state;
+       enum ttm_caching caching;
 };
 
 static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
@@ -79,33 +73,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
        return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
 }
 
-static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt)
-{
-       tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
-}
-
-static inline void ttm_tt_set_populated(struct ttm_tt *tt)
-{
-       tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
-}
-
-/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_dma_tt {
-       struct ttm_tt ttm;
-       dma_addr_t *dma_address;
-       struct list_head pages_list;
-};
-
 /**
  * ttm_tt_create
  *
@@ -123,6 +90,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
  * @ttm: The struct ttm_tt.
  * @bo: The buffer object we create the ttm for.
  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @caching: the desired caching state of the pages
  *
  * Create a struct ttm_tt to back data with system memory pages.
  * No pages are actually allocated.
@@ -130,11 +98,11 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
  * NULL: Out of memory.
  */
 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
-               uint32_t page_flags);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
-                   uint32_t page_flags);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
-                  uint32_t page_flags);
+               uint32_t page_flags, enum ttm_caching caching);
+int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+                   uint32_t page_flags, enum ttm_caching caching);
+int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+                  uint32_t page_flags, enum ttm_caching caching);
 
 /**
  * ttm_tt_fini
@@ -144,7 +112,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
  * Free memory of ttm_tt structure
  */
 void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
 
 /**
  * ttm_ttm_destroy:
@@ -170,22 +137,7 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
  * Swap in a previously swap out ttm_tt.
  */
 int ttm_tt_swapin(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_set_placement_caching:
- *
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
- *
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
- */
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage);
+int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
 
 /**
  * ttm_tt_populate - allocate pages for a ttm
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
new file mode 100644 (file)
index 0000000..fd1aba5
--- /dev/null
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer to dma-buf-mapped memory, plus helpers.
+ */
+
+#ifndef __DMA_BUF_MAP_H__
+#define __DMA_BUF_MAP_H__
+
+#include <linux/io.h>
+
+/**
+ * DOC: overview
+ *
+ * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
+ * Depending on the location of the buffer, users may have to access it with
+ * I/O operations or memory load/store operations. For example, copying to
+ * system memory could be done with memcpy(), copying to I/O memory would be
+ * done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ *     void *vaddr = ...; // pointer to system memory
+ *     memcpy(vaddr, src, len);
+ *
+ *     void *vaddr_iomem = ...; // pointer to I/O memory
+ *     memcpy_toio(vaddr, _iomem, src, len);
+ *
+ * When using dma-buf's vmap operation, the returned pointer is encoded as
+ * :c:type:`struct dma_buf_map <dma_buf_map>`.
+ * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
+ * system or I/O memory and a flag that signals the required method of
+ * accessing the buffer. Use the returned instance and the helper functions
+ * to access the buffer's memory in the correct way.
+ *
+ * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
+ * considered bad style. Rather then accessing its fields directly, use one
+ * of the provided helper functions, or implement your own. For example,
+ * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
+ * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
+ * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ *     struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ *     dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
+ *
+ * Test if a mapping is valid with either dma_buf_map_is_set() or
+ * dma_buf_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ *     if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
+ *             // always true
+ *
+ * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
+ * for equality with dma_buf_map_is_equal(). Mappings the point to different
+ * memory spaces, system or I/O, are never equal. That's even true if both
+ * spaces are located in the same address space, both mappings contain the
+ * same address value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ *     struct dma_buf_map sys_map; // refers to system memory
+ *     struct dma_buf_map io_map; // refers to I/O memory
+ *
+ *     if (dma_buf_map_is_equal(&sys_map, &io_map))
+ *             // always false
+ *
+ * Instances of struct dma_buf_map do not have to be cleaned up, but
+ * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
+ * actually independent from the dma-buf infrastructure. When sharing buffers
+ * among devices, drivers have to know the location of the memory to access
+ * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
+ * solves this problem for dma-buf and its users. If other drivers or
+ * sub-systems require similar functionality, the type could be generalized
+ * and moved to a more prominent header file.
+ */
+
+/**
+ * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
+ * @vaddr_iomem:       The buffer's address if in I/O memory
+ * @vaddr:             The buffer's address if in system memory
+ * @is_iomem:          True if the dma-buf memory is located in I/O
+ *                     memory, or false otherwise.
+ */
+struct dma_buf_map {
+       union {
+               void __iomem *vaddr_iomem;
+               void *vaddr;
+       };
+       bool is_iomem;
+};
+
+/**
+ * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
+ * @vaddr:     A system-memory address
+ */
+#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
+       { \
+               .vaddr = (vaddr_), \
+               .is_iomem = false, \
+       }
+
+/**
+ * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
+ * @map:       The dma-buf mapping structure
+ * @vaddr:     A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
+{
+       map->vaddr = vaddr;
+       map->is_iomem = false;
+}
+
+/**
+ * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
+ * @lhs:       The dma-buf mapping structure
+ * @rhs:       A dma-buf mapping structure to compare with
+ *
+ * Two dma-buf mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
+                                       const struct dma_buf_map *rhs)
+{
+       if (lhs->is_iomem != rhs->is_iomem)
+               return false;
+       else if (lhs->is_iomem)
+               return lhs->vaddr_iomem == rhs->vaddr_iomem;
+       else
+               return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
+ * @map:       The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
+{
+       if (map->is_iomem)
+               return !map->vaddr_iomem;
+       return !map->vaddr;
+}
+
+/**
+ * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
+ * @map:       The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
+{
+       return !dma_buf_map_is_null(map);
+}
+
+/**
+ * dma_buf_map_clear - Clears a dma-buf mapping structure
+ * @map:       The dma-buf mapping structure
+ *
+ * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void dma_buf_map_clear(struct dma_buf_map *map)
+{
+       if (map->is_iomem) {
+               map->vaddr_iomem = NULL;
+               map->is_iomem = false;
+       } else {
+               map->vaddr = NULL;
+       }
+}
+
+#endif /* __DMA_BUF_MAP_H__ */
index 957b398d30e5d88959c3374157689b5c6b412803..03875eaed51ad238b7203dacf2edef9bfc088e3a 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef __DMA_BUF_H__
 #define __DMA_BUF_H__
 
+#include <linux/dma-buf-map.h>
 #include <linux/file.h>
 #include <linux/err.h>
 #include <linux/scatterlist.h>
@@ -145,7 +146,8 @@ struct dma_buf_ops {
         *
         * A &sg_table scatter list of or the backing storage of the DMA buffer,
         * already mapped into the device address space of the &device attached
-        * with the provided &dma_buf_attachment.
+        * with the provided &dma_buf_attachment. The addresses and lengths in
+        * the scatter list are PAGE_SIZE aligned.
         *
         * On failure, returns a negative error value wrapped into a pointer.
         * May also return -EINTR when a signal was received while being
@@ -265,8 +267,8 @@ struct dma_buf_ops {
         */
        int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
 
-       void *(*vmap)(struct dma_buf *);
-       void (*vunmap)(struct dma_buf *, void *vaddr);
+       int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
+       void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
 };
 
 /**
@@ -309,7 +311,7 @@ struct dma_buf {
        const struct dma_buf_ops *ops;
        struct mutex lock;
        unsigned vmapping_counter;
-       void *vmap_ptr;
+       struct dma_buf_map vmap_ptr;
        const char *exp_name;
        const char *name;
        spinlock_t name_lock;
@@ -502,6 +504,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 
 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
                 unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
 #endif /* __DMA_BUF_H__ */
index b5b312c19e463e6378f89866ef0bc5bd959523a0..4f50d736ea72bdb2f741f77cc88445f0e1083a81 100644 (file)
@@ -16,7 +16,7 @@
 struct font_desc {
     int idx;
     const char *name;
-    int width, height;
+    unsigned int width, height;
     const void *data;
     int pref;
 };
index fe815d7d9f58c11ba73e04513564a75e6ba8ed31..d661399b217dfc4b09eb3680a85ec6d5497bb196 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef __SHMOB_DRM_H__
 #define __SHMOB_DRM_H__
 
-#include <linux/kernel.h>
-
 #include <drm/drm_mode.h>
 
 enum shmob_drm_clk_source {
index 82f327801267759b15b9ca44d703a63833083056..9f7e19c9416c51a936fc3f45201c569f59a33aa9 100644 (file)
@@ -58,6 +58,30 @@ extern "C" {
  * may preserve meaning - such as number of planes - from the fourcc code,
  * whereas others may not.
  *
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ *   don't alias, otherwise two drivers might support the same format but use
+ *   different aliases, preventing them from sharing buffers in an efficient
+ *   format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ *   see modifiers as opaque tokens they can check for equality and intersect.
+ *   These users musn't need to know to reason about the modifier value
+ *   (i.e. they are not expected to extract information out of the modifier).
+ *
  * Vendors should document their modifier usage in as much detail as
  * possible, to ensure maximum compatibility across devices, drivers and
  * applications.
@@ -155,6 +179,12 @@ extern "C" {
 #define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
 #define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
 
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
 /* packed YCbCr */
 #define DRM_FORMAT_YUYV                fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
 #define DRM_FORMAT_YVYU                fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -320,7 +350,6 @@ extern "C" {
  */
 
 /* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE           0
 #define DRM_FORMAT_MOD_VENDOR_NONE    0
 #define DRM_FORMAT_MOD_VENDOR_INTEL   0x01
 #define DRM_FORMAT_MOD_VENDOR_AMD     0x02
@@ -392,6 +421,16 @@ extern "C" {
  */
 #define DRM_FORMAT_MOD_LINEAR  fourcc_mod_code(NONE, 0)
 
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE    0
+
 /* Intel framebuffer modifiers */
 
 /*
index f06a789f34cd9993f3952da30723adf995790605..b9ec26e9c64636bef5ebeb7f8890cfceb7a92b70 100644 (file)
@@ -46,6 +46,7 @@ extern "C" {
 #define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
 #define DRM_VIRTGPU_WAIT     0x08
 #define DRM_VIRTGPU_GET_CAPS  0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
 
 #define VIRTGPU_EXECBUF_FENCE_FD_IN    0x01
 #define VIRTGPU_EXECBUF_FENCE_FD_OUT   0x02
@@ -71,6 +72,9 @@ struct drm_virtgpu_execbuffer {
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
 #define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing  */
 
 struct drm_virtgpu_getparam {
        __u64 param;
@@ -100,7 +104,7 @@ struct drm_virtgpu_resource_info {
        __u32 bo_handle;
        __u32 res_handle;
        __u32 size;
-       __u32 stride;
+       __u32 blob_mem;
 };
 
 struct drm_virtgpu_3d_box {
@@ -117,6 +121,8 @@ struct drm_virtgpu_3d_transfer_to_host {
        struct drm_virtgpu_3d_box box;
        __u32 level;
        __u32 offset;
+       __u32 stride;
+       __u32 layer_stride;
 };
 
 struct drm_virtgpu_3d_transfer_from_host {
@@ -124,6 +130,8 @@ struct drm_virtgpu_3d_transfer_from_host {
        struct drm_virtgpu_3d_box box;
        __u32 level;
        __u32 offset;
+       __u32 stride;
+       __u32 layer_stride;
 };
 
 #define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -140,6 +148,31 @@ struct drm_virtgpu_get_caps {
        __u32 pad;
 };
 
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST             0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D            0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST      0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE     0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE    0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+       /* zero is invalid blob_mem */
+       __u32 blob_mem;
+       __u32 blob_flags;
+       __u32 bo_handle;
+       __u32 res_handle;
+       __u64 size;
+
+       /*
+        * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+        * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+        */
+       __u32 pad;
+       __u32 cmd_size;
+       __u64 cmd;
+       __u64 blob_id;
+};
+
 #define DRM_IOCTL_VIRTGPU_MAP \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
 
@@ -175,6 +208,10 @@ struct drm_virtgpu_get_caps {
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
        struct drm_virtgpu_get_caps)
 
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB                         \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB,   \
+               struct drm_virtgpu_resource_create_blob)
+
 #if defined(__cplusplus)
 }
 #endif
index 747a5c5cc4e60665047891d940f43be8d7292be0..0ec6b610402cb65e7ccc36338f86ccdd353b3b6d 100644 (file)
  */
 #define VIRTIO_GPU_F_RESOURCE_UUID       2
 
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB       3
+
 enum virtio_gpu_ctrl_type {
        VIRTIO_GPU_UNDEFINED = 0,
 
@@ -71,6 +76,8 @@ enum virtio_gpu_ctrl_type {
        VIRTIO_GPU_CMD_GET_CAPSET,
        VIRTIO_GPU_CMD_GET_EDID,
        VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+       VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+       VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
 
        /* 3d commands */
        VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -81,6 +88,8 @@ enum virtio_gpu_ctrl_type {
        VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
        VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
        VIRTIO_GPU_CMD_SUBMIT_3D,
+       VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+       VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
 
        /* cursor commands */
        VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -93,6 +102,7 @@ enum virtio_gpu_ctrl_type {
        VIRTIO_GPU_RESP_OK_CAPSET,
        VIRTIO_GPU_RESP_OK_EDID,
        VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+       VIRTIO_GPU_RESP_OK_MAP_INFO,
 
        /* error responses */
        VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -103,6 +113,11 @@ enum virtio_gpu_ctrl_type {
        VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
 };
 
+enum virtio_gpu_shm_id {
+       VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+       VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
 #define VIRTIO_GPU_FLAG_FENCE (1 << 0)
 
 struct virtio_gpu_ctrl_hdr {
@@ -359,4 +374,67 @@ struct virtio_gpu_resp_resource_uuid {
        __u8 uuid[16];
 };
 
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+       struct virtio_gpu_ctrl_hdr hdr;
+       __le32 resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST             0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D            0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST      0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE     0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE    0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+       /* zero is invalid blob mem */
+       __le32 blob_mem;
+       __le32 blob_flags;
+       __le32 nr_entries;
+       __le64 blob_id;
+       __le64 size;
+       /*
+        * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+        */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+       struct virtio_gpu_ctrl_hdr hdr;
+       struct virtio_gpu_rect r;
+       __le32 scanout_id;
+       __le32 resource_id;
+       __le32 width;
+       __le32 height;
+       __le32 format;
+       __le32 padding;
+       __le32 strides[4];
+       __le32 offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+       struct virtio_gpu_ctrl_hdr hdr;
+       __le32 resource_id;
+       __le32 padding;
+       __le64 offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK     0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE     0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED   0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC       0x03
+struct virtio_gpu_resp_map_info {
+       struct virtio_gpu_ctrl_hdr hdr;
+       __u32 map_info;
+       __u32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+       struct virtio_gpu_ctrl_hdr hdr;
+       __le32 resource_id;
+       __le32 padding;
+};
+
 #endif