blob: 4f20742e7788c4fe0626782c18b888795d5179c8 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
3 * All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
26 */
27
28#include <drm/drmP.h>
29#include <drm/via_drm.h>
30#include "via_drv.h"
31
32#define VIA_MM_ALIGN_SHIFT 4
33#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
34
35struct via_memblock {
36 struct drm_mm_node mm_node;
37 struct list_head owner_list;
38};
39
40int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
41{
42 drm_via_agp_t *agp = data;
43 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
44
45 mutex_lock(&dev->struct_mutex);
46 drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
47
48 dev_priv->agp_initialized = 1;
49 dev_priv->agp_offset = agp->offset;
50 mutex_unlock(&dev->struct_mutex);
51
52 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
53 return 0;
54}
55
56int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
57{
58 drm_via_fb_t *fb = data;
59 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
60
61 mutex_lock(&dev->struct_mutex);
62 drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
63
64 dev_priv->vram_initialized = 1;
65 dev_priv->vram_offset = fb->offset;
66
67 mutex_unlock(&dev->struct_mutex);
68 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
69
70 return 0;
71
72}
73
74int via_final_context(struct drm_device *dev, int context)
75{
76 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
77
78 via_release_futex(dev_priv, context);
79
80 /* Linux specific until context tracking code gets ported to BSD */
81 /* Last context, perform cleanup */
82 if (list_is_singular(&dev->ctxlist)) {
83 DRM_DEBUG("Last Context\n");
84 drm_irq_uninstall(dev);
85 via_cleanup_futex(dev_priv);
86 via_do_cleanup_map(dev);
87 }
88 return 1;
89}
90
91void via_lastclose(struct drm_device *dev)
92{
93 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
94
95 if (!dev_priv)
96 return;
97
98 mutex_lock(&dev->struct_mutex);
99 if (dev_priv->vram_initialized) {
100 drm_mm_takedown(&dev_priv->vram_mm);
101 dev_priv->vram_initialized = 0;
102 }
103 if (dev_priv->agp_initialized) {
104 drm_mm_takedown(&dev_priv->agp_mm);
105 dev_priv->agp_initialized = 0;
106 }
107 mutex_unlock(&dev->struct_mutex);
108}
109
110int via_mem_alloc(struct drm_device *dev, void *data,
111 struct drm_file *file)
112{
113 drm_via_mem_t *mem = data;
114 int retval = 0, user_key;
115 struct via_memblock *item;
116 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
117 struct via_file_private *file_priv = file->driver_priv;
118 unsigned long tmpSize;
119
120 if (mem->type > VIA_MEM_AGP) {
121 DRM_ERROR("Unknown memory type allocation\n");
122 return -EINVAL;
123 }
124 mutex_lock(&dev->struct_mutex);
125 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
126 dev_priv->agp_initialized)) {
127 DRM_ERROR
128 ("Attempt to allocate from uninitialized memory manager.\n");
129 mutex_unlock(&dev->struct_mutex);
130 return -EINVAL;
131 }
132
133 item = kzalloc(sizeof(*item), GFP_KERNEL);
134 if (!item) {
135 retval = -ENOMEM;
136 goto fail_alloc;
137 }
138
139 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
140 if (mem->type == VIA_MEM_AGP)
141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 &item->mm_node,
143 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
144 else
145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 &item->mm_node,
147 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
148 if (retval)
149 goto fail_alloc;
150
151 retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
152 if (retval < 0)
153 goto fail_idr;
154 user_key = retval;
155
156 list_add(&item->owner_list, &file_priv->obj_list);
157 mutex_unlock(&dev->struct_mutex);
158
159 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
160 dev_priv->vram_offset : dev_priv->agp_offset) +
161 ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
162 mem->index = user_key;
163
164 return 0;
165
166fail_idr:
167 drm_mm_remove_node(&item->mm_node);
168fail_alloc:
169 kfree(item);
170 mutex_unlock(&dev->struct_mutex);
171
172 mem->offset = 0;
173 mem->size = 0;
174 mem->index = 0;
175 DRM_DEBUG("Video memory allocation failed\n");
176
177 return retval;
178}
179
180int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
181{
182 drm_via_private_t *dev_priv = dev->dev_private;
183 drm_via_mem_t *mem = data;
184 struct via_memblock *obj;
185
186 mutex_lock(&dev->struct_mutex);
187 obj = idr_find(&dev_priv->object_idr, mem->index);
188 if (obj == NULL) {
189 mutex_unlock(&dev->struct_mutex);
190 return -EINVAL;
191 }
192
193 idr_remove(&dev_priv->object_idr, mem->index);
194 list_del(&obj->owner_list);
195 drm_mm_remove_node(&obj->mm_node);
196 kfree(obj);
197 mutex_unlock(&dev->struct_mutex);
198
199 DRM_DEBUG("free = 0x%lx\n", mem->index);
200
201 return 0;
202}
203
204
205void via_reclaim_buffers_locked(struct drm_device *dev,
206 struct drm_file *file)
207{
208 struct via_file_private *file_priv = file->driver_priv;
209 struct via_memblock *entry, *next;
210
211 if (!(file->minor->master && file->master->lock.hw_lock))
212 return;
213
214 drm_legacy_idlelock_take(&file->master->lock);
215
216 mutex_lock(&dev->struct_mutex);
217 if (list_empty(&file_priv->obj_list)) {
218 mutex_unlock(&dev->struct_mutex);
219 drm_legacy_idlelock_release(&file->master->lock);
220
221 return;
222 }
223
224 via_driver_dma_quiescent(dev);
225
226 list_for_each_entry_safe(entry, next, &file_priv->obj_list,
227 owner_list) {
228 list_del(&entry->owner_list);
229 drm_mm_remove_node(&entry->mm_node);
230 kfree(entry);
231 }
232 mutex_unlock(&dev->struct_mutex);
233
234 drm_legacy_idlelock_release(&file->master->lock);
235
236 return;
237}