Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_gmrid_manager.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include "vmwgfx_drv.h"
32 #include <drm/ttm/ttm_module.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <linux/idr.h>
36 #include <linux/spinlock.h>
37 #include <linux/kernel.h>
38
39 struct vmwgfx_gmrid_man {
40         spinlock_t lock;
41         struct ida gmr_ida;
42         uint32_t max_gmr_ids;
43         uint32_t max_gmr_pages;
44         uint32_t used_gmr_pages;
45 };
46
47 static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48                                   struct ttm_buffer_object *bo,
49                                   const struct ttm_place *place,
50                                   struct ttm_mem_reg *mem)
51 {
52         struct vmwgfx_gmrid_man *gman =
53                 (struct vmwgfx_gmrid_man *)man->priv;
54         int id;
55
56         mem->mm_node = NULL;
57
58         id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
59         if (id < 0)
60                 return (id != -ENOMEM ? 0 : id);
61
62         spin_lock(&gman->lock);
63
64         if (gman->max_gmr_pages > 0) {
65                 gman->used_gmr_pages += bo->num_pages;
66                 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
67                         goto nospace;
68         }
69
70         mem->mm_node = gman;
71         mem->start = id;
72         mem->num_pages = bo->num_pages;
73
74         spin_unlock(&gman->lock);
75         return 0;
76
77 nospace:
78         gman->used_gmr_pages -= bo->num_pages;
79         spin_unlock(&gman->lock);
80         ida_free(&gman->gmr_ida, id);
81         return 0;
82 }
83
84 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
85                                    struct ttm_mem_reg *mem)
86 {
87         struct vmwgfx_gmrid_man *gman =
88                 (struct vmwgfx_gmrid_man *)man->priv;
89
90         if (mem->mm_node) {
91                 ida_free(&gman->gmr_ida, mem->start);
92                 spin_lock(&gman->lock);
93                 gman->used_gmr_pages -= mem->num_pages;
94                 spin_unlock(&gman->lock);
95                 mem->mm_node = NULL;
96         }
97 }
98
99 static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
100                               unsigned long p_size)
101 {
102         struct vmw_private *dev_priv =
103                 container_of(man->bdev, struct vmw_private, bdev);
104         struct vmwgfx_gmrid_man *gman =
105                 kzalloc(sizeof(*gman), GFP_KERNEL);
106
107         if (unlikely(!gman))
108                 return -ENOMEM;
109
110         spin_lock_init(&gman->lock);
111         gman->used_gmr_pages = 0;
112         ida_init(&gman->gmr_ida);
113
114         switch (p_size) {
115         case VMW_PL_GMR:
116                 gman->max_gmr_ids = dev_priv->max_gmr_ids;
117                 gman->max_gmr_pages = dev_priv->max_gmr_pages;
118                 break;
119         case VMW_PL_MOB:
120                 gman->max_gmr_ids = VMWGFX_NUM_MOB;
121                 gman->max_gmr_pages = dev_priv->max_mob_pages;
122                 break;
123         default:
124                 BUG();
125         }
126         man->priv = (void *) gman;
127         return 0;
128 }
129
130 static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
131 {
132         struct vmwgfx_gmrid_man *gman =
133                 (struct vmwgfx_gmrid_man *)man->priv;
134
135         if (gman) {
136                 ida_destroy(&gman->gmr_ida);
137                 kfree(gman);
138         }
139         return 0;
140 }
141
142 static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
143                                 struct drm_printer *printer)
144 {
145         drm_printf(printer, "No debug info available for the GMR id manager\n");
146 }
147
148 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
149         .init = vmw_gmrid_man_init,
150         .takedown = vmw_gmrid_man_takedown,
151         .get_node = vmw_gmrid_man_get_node,
152         .put_node = vmw_gmrid_man_put_node,
153         .debug = vmw_gmrid_man_debug
154 };