Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
| 28 | #include "vmwgfx_drv.h" |
| 29 | #include <drm/vmwgfx_drm.h> |
| 30 | #include <drm/ttm/ttm_object.h> |
| 31 | #include <drm/ttm/ttm_placement.h> |
| 32 | #include <drm/drmP.h> |
| 33 | #include "vmwgfx_resource_priv.h" |
| 34 | #include "vmwgfx_binding.h" |
| 35 | |
| 36 | #define VMW_RES_EVICT_ERR_COUNT 10 |
| 37 | |
| 38 | struct vmw_user_dma_buffer { |
| 39 | struct ttm_prime_object prime; |
| 40 | struct vmw_dma_buffer dma; |
| 41 | }; |
| 42 | |
| 43 | struct vmw_bo_user_rep { |
| 44 | uint32_t handle; |
| 45 | uint64_t map_handle; |
| 46 | }; |
| 47 | |
| 48 | struct vmw_stream { |
| 49 | struct vmw_resource res; |
| 50 | uint32_t stream_id; |
| 51 | }; |
| 52 | |
| 53 | struct vmw_user_stream { |
| 54 | struct ttm_base_object base; |
| 55 | struct vmw_stream stream; |
| 56 | }; |
| 57 | |
| 58 | |
| 59 | static uint64_t vmw_user_stream_size; |
| 60 | |
| 61 | static const struct vmw_res_func vmw_stream_func = { |
| 62 | .res_type = vmw_res_stream, |
| 63 | .needs_backup = false, |
| 64 | .may_evict = false, |
| 65 | .type_name = "video streams", |
| 66 | .backup_placement = NULL, |
| 67 | .create = NULL, |
| 68 | .destroy = NULL, |
| 69 | .bind = NULL, |
| 70 | .unbind = NULL |
| 71 | }; |
| 72 | |
| 73 | static inline struct vmw_dma_buffer * |
| 74 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
| 75 | { |
| 76 | return container_of(bo, struct vmw_dma_buffer, base); |
| 77 | } |
| 78 | |
| 79 | static inline struct vmw_user_dma_buffer * |
| 80 | vmw_user_dma_buffer(struct ttm_buffer_object *bo) |
| 81 | { |
| 82 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
| 83 | return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); |
| 84 | } |
| 85 | |
| 86 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) |
| 87 | { |
| 88 | kref_get(&res->kref); |
| 89 | return res; |
| 90 | } |
| 91 | |
| 92 | struct vmw_resource * |
| 93 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) |
| 94 | { |
| 95 | return kref_get_unless_zero(&res->kref) ? res : NULL; |
| 96 | } |
| 97 | |
| 98 | /** |
| 99 | * vmw_resource_release_id - release a resource id to the id manager. |
| 100 | * |
| 101 | * @res: Pointer to the resource. |
| 102 | * |
| 103 | * Release the resource id to the resource id manager and set it to -1 |
| 104 | */ |
| 105 | void vmw_resource_release_id(struct vmw_resource *res) |
| 106 | { |
| 107 | struct vmw_private *dev_priv = res->dev_priv; |
| 108 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
| 109 | |
| 110 | write_lock(&dev_priv->resource_lock); |
| 111 | if (res->id != -1) |
| 112 | idr_remove(idr, res->id); |
| 113 | res->id = -1; |
| 114 | write_unlock(&dev_priv->resource_lock); |
| 115 | } |
| 116 | |
| 117 | static void vmw_resource_release(struct kref *kref) |
| 118 | { |
| 119 | struct vmw_resource *res = |
| 120 | container_of(kref, struct vmw_resource, kref); |
| 121 | struct vmw_private *dev_priv = res->dev_priv; |
| 122 | int id; |
| 123 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
| 124 | |
| 125 | write_lock(&dev_priv->resource_lock); |
| 126 | res->avail = false; |
| 127 | list_del_init(&res->lru_head); |
| 128 | write_unlock(&dev_priv->resource_lock); |
| 129 | if (res->backup) { |
| 130 | struct ttm_buffer_object *bo = &res->backup->base; |
| 131 | |
| 132 | ttm_bo_reserve(bo, false, false, false, NULL); |
| 133 | if (!list_empty(&res->mob_head) && |
| 134 | res->func->unbind != NULL) { |
| 135 | struct ttm_validate_buffer val_buf; |
| 136 | |
| 137 | val_buf.bo = bo; |
| 138 | val_buf.shared = false; |
| 139 | res->func->unbind(res, false, &val_buf); |
| 140 | } |
| 141 | res->backup_dirty = false; |
| 142 | list_del_init(&res->mob_head); |
| 143 | ttm_bo_unreserve(bo); |
| 144 | vmw_dmabuf_unreference(&res->backup); |
| 145 | } |
| 146 | |
| 147 | if (likely(res->hw_destroy != NULL)) { |
| 148 | mutex_lock(&dev_priv->binding_mutex); |
| 149 | vmw_binding_res_list_kill(&res->binding_head); |
| 150 | mutex_unlock(&dev_priv->binding_mutex); |
| 151 | res->hw_destroy(res); |
| 152 | } |
| 153 | |
| 154 | id = res->id; |
| 155 | if (res->res_free != NULL) |
| 156 | res->res_free(res); |
| 157 | else |
| 158 | kfree(res); |
| 159 | |
| 160 | write_lock(&dev_priv->resource_lock); |
| 161 | if (id != -1) |
| 162 | idr_remove(idr, id); |
| 163 | write_unlock(&dev_priv->resource_lock); |
| 164 | } |
| 165 | |
| 166 | void vmw_resource_unreference(struct vmw_resource **p_res) |
| 167 | { |
| 168 | struct vmw_resource *res = *p_res; |
| 169 | |
| 170 | *p_res = NULL; |
| 171 | kref_put(&res->kref, vmw_resource_release); |
| 172 | } |
| 173 | |
| 174 | |
| 175 | /** |
| 176 | * vmw_resource_alloc_id - release a resource id to the id manager. |
| 177 | * |
| 178 | * @res: Pointer to the resource. |
| 179 | * |
| 180 | * Allocate the lowest free resource from the resource manager, and set |
| 181 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
| 182 | */ |
| 183 | int vmw_resource_alloc_id(struct vmw_resource *res) |
| 184 | { |
| 185 | struct vmw_private *dev_priv = res->dev_priv; |
| 186 | int ret; |
| 187 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
| 188 | |
| 189 | BUG_ON(res->id != -1); |
| 190 | |
| 191 | idr_preload(GFP_KERNEL); |
| 192 | write_lock(&dev_priv->resource_lock); |
| 193 | |
| 194 | ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); |
| 195 | if (ret >= 0) |
| 196 | res->id = ret; |
| 197 | |
| 198 | write_unlock(&dev_priv->resource_lock); |
| 199 | idr_preload_end(); |
| 200 | return ret < 0 ? ret : 0; |
| 201 | } |
| 202 | |
| 203 | /** |
| 204 | * vmw_resource_init - initialize a struct vmw_resource |
| 205 | * |
| 206 | * @dev_priv: Pointer to a device private struct. |
| 207 | * @res: The struct vmw_resource to initialize. |
| 208 | * @obj_type: Resource object type. |
| 209 | * @delay_id: Boolean whether to defer device id allocation until |
| 210 | * the first validation. |
| 211 | * @res_free: Resource destructor. |
| 212 | * @func: Resource function table. |
| 213 | */ |
| 214 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, |
| 215 | bool delay_id, |
| 216 | void (*res_free) (struct vmw_resource *res), |
| 217 | const struct vmw_res_func *func) |
| 218 | { |
| 219 | kref_init(&res->kref); |
| 220 | res->hw_destroy = NULL; |
| 221 | res->res_free = res_free; |
| 222 | res->avail = false; |
| 223 | res->dev_priv = dev_priv; |
| 224 | res->func = func; |
| 225 | INIT_LIST_HEAD(&res->lru_head); |
| 226 | INIT_LIST_HEAD(&res->mob_head); |
| 227 | INIT_LIST_HEAD(&res->binding_head); |
| 228 | res->id = -1; |
| 229 | res->backup = NULL; |
| 230 | res->backup_offset = 0; |
| 231 | res->backup_dirty = false; |
| 232 | res->res_dirty = false; |
| 233 | if (delay_id) |
| 234 | return 0; |
| 235 | else |
| 236 | return vmw_resource_alloc_id(res); |
| 237 | } |
| 238 | |
| 239 | /** |
| 240 | * vmw_resource_activate |
| 241 | * |
| 242 | * @res: Pointer to the newly created resource |
| 243 | * @hw_destroy: Destroy function. NULL if none. |
| 244 | * |
| 245 | * Activate a resource after the hardware has been made aware of it. |
| 246 | * Set tye destroy function to @destroy. Typically this frees the |
| 247 | * resource and destroys the hardware resources associated with it. |
| 248 | * Activate basically means that the function vmw_resource_lookup will |
| 249 | * find it. |
| 250 | */ |
| 251 | void vmw_resource_activate(struct vmw_resource *res, |
| 252 | void (*hw_destroy) (struct vmw_resource *)) |
| 253 | { |
| 254 | struct vmw_private *dev_priv = res->dev_priv; |
| 255 | |
| 256 | write_lock(&dev_priv->resource_lock); |
| 257 | res->avail = true; |
| 258 | res->hw_destroy = hw_destroy; |
| 259 | write_unlock(&dev_priv->resource_lock); |
| 260 | } |
| 261 | |
| 262 | static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, |
| 263 | struct idr *idr, int id) |
| 264 | { |
| 265 | struct vmw_resource *res; |
| 266 | |
| 267 | read_lock(&dev_priv->resource_lock); |
| 268 | res = idr_find(idr, id); |
| 269 | if (!res || !res->avail || !kref_get_unless_zero(&res->kref)) |
| 270 | res = NULL; |
| 271 | |
| 272 | read_unlock(&dev_priv->resource_lock); |
| 273 | |
| 274 | if (unlikely(res == NULL)) |
| 275 | return NULL; |
| 276 | |
| 277 | return res; |
| 278 | } |
| 279 | |
| 280 | /** |
| 281 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
| 282 | * TTM user-space handle and perform basic type checks |
| 283 | * |
| 284 | * @dev_priv: Pointer to a device private struct |
| 285 | * @tfile: Pointer to a struct ttm_object_file identifying the caller |
| 286 | * @handle: The TTM user-space handle |
| 287 | * @converter: Pointer to an object describing the resource type |
| 288 | * @p_res: On successful return the location pointed to will contain |
| 289 | * a pointer to a refcounted struct vmw_resource. |
| 290 | * |
| 291 | * If the handle can't be found or is associated with an incorrect resource |
| 292 | * type, -EINVAL will be returned. |
| 293 | */ |
| 294 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, |
| 295 | struct ttm_object_file *tfile, |
| 296 | uint32_t handle, |
| 297 | const struct vmw_user_resource_conv |
| 298 | *converter, |
| 299 | struct vmw_resource **p_res) |
| 300 | { |
| 301 | struct ttm_base_object *base; |
| 302 | struct vmw_resource *res; |
| 303 | int ret = -EINVAL; |
| 304 | |
| 305 | base = ttm_base_object_lookup(tfile, handle); |
| 306 | if (unlikely(base == NULL)) |
| 307 | return -EINVAL; |
| 308 | |
| 309 | if (unlikely(ttm_base_object_type(base) != converter->object_type)) |
| 310 | goto out_bad_resource; |
| 311 | |
| 312 | res = converter->base_obj_to_res(base); |
| 313 | |
| 314 | read_lock(&dev_priv->resource_lock); |
| 315 | if (!res->avail || res->res_free != converter->res_free) { |
| 316 | read_unlock(&dev_priv->resource_lock); |
| 317 | goto out_bad_resource; |
| 318 | } |
| 319 | |
| 320 | kref_get(&res->kref); |
| 321 | read_unlock(&dev_priv->resource_lock); |
| 322 | |
| 323 | *p_res = res; |
| 324 | ret = 0; |
| 325 | |
| 326 | out_bad_resource: |
| 327 | ttm_base_object_unref(&base); |
| 328 | |
| 329 | return ret; |
| 330 | } |
| 331 | |
| 332 | /** |
| 333 | * Helper function that looks either a surface or dmabuf. |
| 334 | * |
| 335 | * The pointer this pointed at by out_surf and out_buf needs to be null. |
| 336 | */ |
| 337 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
| 338 | struct ttm_object_file *tfile, |
| 339 | uint32_t handle, |
| 340 | struct vmw_surface **out_surf, |
| 341 | struct vmw_dma_buffer **out_buf) |
| 342 | { |
| 343 | struct vmw_resource *res; |
| 344 | int ret; |
| 345 | |
| 346 | BUG_ON(*out_surf || *out_buf); |
| 347 | |
| 348 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, |
| 349 | user_surface_converter, |
| 350 | &res); |
| 351 | if (!ret) { |
| 352 | *out_surf = vmw_res_to_srf(res); |
| 353 | return 0; |
| 354 | } |
| 355 | |
| 356 | *out_surf = NULL; |
| 357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); |
| 358 | return ret; |
| 359 | } |
| 360 | |
| 361 | /** |
| 362 | * Buffer management. |
| 363 | */ |
| 364 | |
| 365 | /** |
| 366 | * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers |
| 367 | * |
| 368 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
| 369 | * @size: The requested buffer size. |
| 370 | * @user: Whether this is an ordinary dma buffer or a user dma buffer. |
| 371 | */ |
| 372 | static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, |
| 373 | bool user) |
| 374 | { |
| 375 | static size_t struct_size, user_struct_size; |
| 376 | size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 377 | size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); |
| 378 | |
| 379 | if (unlikely(struct_size == 0)) { |
| 380 | size_t backend_size = ttm_round_pot(vmw_tt_size); |
| 381 | |
| 382 | struct_size = backend_size + |
| 383 | ttm_round_pot(sizeof(struct vmw_dma_buffer)); |
| 384 | user_struct_size = backend_size + |
| 385 | ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); |
| 386 | } |
| 387 | |
| 388 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 389 | page_array_size += |
| 390 | ttm_round_pot(num_pages * sizeof(dma_addr_t)); |
| 391 | |
| 392 | return ((user) ? user_struct_size : struct_size) + |
| 393 | page_array_size; |
| 394 | } |
| 395 | |
| 396 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
| 397 | { |
| 398 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
| 399 | |
| 400 | kfree(vmw_bo); |
| 401 | } |
| 402 | |
| 403 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
| 404 | { |
| 405 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
| 406 | |
| 407 | ttm_prime_object_kfree(vmw_user_bo, prime); |
| 408 | } |
| 409 | |
| 410 | int vmw_dmabuf_init(struct vmw_private *dev_priv, |
| 411 | struct vmw_dma_buffer *vmw_bo, |
| 412 | size_t size, struct ttm_placement *placement, |
| 413 | bool interruptible, |
| 414 | void (*bo_free) (struct ttm_buffer_object *bo)) |
| 415 | { |
| 416 | struct ttm_bo_device *bdev = &dev_priv->bdev; |
| 417 | size_t acc_size; |
| 418 | int ret; |
| 419 | bool user = (bo_free == &vmw_user_dmabuf_destroy); |
| 420 | |
| 421 | BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); |
| 422 | |
| 423 | acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); |
| 424 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
| 425 | |
| 426 | INIT_LIST_HEAD(&vmw_bo->res_list); |
| 427 | |
| 428 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
| 429 | ttm_bo_type_device, placement, |
| 430 | 0, interruptible, |
| 431 | NULL, acc_size, NULL, NULL, bo_free); |
| 432 | return ret; |
| 433 | } |
| 434 | |
| 435 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) |
| 436 | { |
| 437 | struct vmw_user_dma_buffer *vmw_user_bo; |
| 438 | struct ttm_base_object *base = *p_base; |
| 439 | struct ttm_buffer_object *bo; |
| 440 | |
| 441 | *p_base = NULL; |
| 442 | |
| 443 | if (unlikely(base == NULL)) |
| 444 | return; |
| 445 | |
| 446 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
| 447 | prime.base); |
| 448 | bo = &vmw_user_bo->dma.base; |
| 449 | ttm_bo_unref(&bo); |
| 450 | } |
| 451 | |
| 452 | static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, |
| 453 | enum ttm_ref_type ref_type) |
| 454 | { |
| 455 | struct vmw_user_dma_buffer *user_bo; |
| 456 | user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); |
| 457 | |
| 458 | switch (ref_type) { |
| 459 | case TTM_REF_SYNCCPU_WRITE: |
| 460 | ttm_bo_synccpu_write_release(&user_bo->dma.base); |
| 461 | break; |
| 462 | default: |
| 463 | BUG(); |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | /** |
| 468 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer |
| 469 | * |
| 470 | * @dev_priv: Pointer to a struct device private. |
| 471 | * @tfile: Pointer to a struct ttm_object_file on which to register the user |
| 472 | * object. |
| 473 | * @size: Size of the dma buffer. |
| 474 | * @shareable: Boolean whether the buffer is shareable with other open files. |
| 475 | * @handle: Pointer to where the handle value should be assigned. |
| 476 | * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer |
| 477 | * should be assigned. |
| 478 | */ |
| 479 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
| 480 | struct ttm_object_file *tfile, |
| 481 | uint32_t size, |
| 482 | bool shareable, |
| 483 | uint32_t *handle, |
| 484 | struct vmw_dma_buffer **p_dma_buf, |
| 485 | struct ttm_base_object **p_base) |
| 486 | { |
| 487 | struct vmw_user_dma_buffer *user_bo; |
| 488 | struct ttm_buffer_object *tmp; |
| 489 | int ret; |
| 490 | |
| 491 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); |
| 492 | if (unlikely(user_bo == NULL)) { |
| 493 | DRM_ERROR("Failed to allocate a buffer.\n"); |
| 494 | return -ENOMEM; |
| 495 | } |
| 496 | |
| 497 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
| 498 | (dev_priv->has_mob) ? |
| 499 | &vmw_sys_placement : |
| 500 | &vmw_vram_sys_placement, true, |
| 501 | &vmw_user_dmabuf_destroy); |
| 502 | if (unlikely(ret != 0)) |
| 503 | return ret; |
| 504 | |
| 505 | tmp = ttm_bo_reference(&user_bo->dma.base); |
| 506 | ret = ttm_prime_object_init(tfile, |
| 507 | size, |
| 508 | &user_bo->prime, |
| 509 | shareable, |
| 510 | ttm_buffer_type, |
| 511 | &vmw_user_dmabuf_release, |
| 512 | &vmw_user_dmabuf_ref_obj_release); |
| 513 | if (unlikely(ret != 0)) { |
| 514 | ttm_bo_unref(&tmp); |
| 515 | goto out_no_base_object; |
| 516 | } |
| 517 | |
| 518 | *p_dma_buf = &user_bo->dma; |
| 519 | if (p_base) { |
| 520 | *p_base = &user_bo->prime.base; |
| 521 | kref_get(&(*p_base)->refcount); |
| 522 | } |
| 523 | *handle = user_bo->prime.base.hash.key; |
| 524 | |
| 525 | out_no_base_object: |
| 526 | return ret; |
| 527 | } |
| 528 | |
| 529 | /** |
| 530 | * vmw_user_dmabuf_verify_access - verify access permissions on this |
| 531 | * buffer object. |
| 532 | * |
| 533 | * @bo: Pointer to the buffer object being accessed |
| 534 | * @tfile: Identifying the caller. |
| 535 | */ |
| 536 | int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
| 537 | struct ttm_object_file *tfile) |
| 538 | { |
| 539 | struct vmw_user_dma_buffer *vmw_user_bo; |
| 540 | |
| 541 | if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) |
| 542 | return -EPERM; |
| 543 | |
| 544 | vmw_user_bo = vmw_user_dma_buffer(bo); |
| 545 | |
| 546 | /* Check that the caller has opened the object. */ |
| 547 | if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) |
| 548 | return 0; |
| 549 | |
| 550 | DRM_ERROR("Could not grant buffer access.\n"); |
| 551 | return -EPERM; |
| 552 | } |
| 553 | |
| 554 | /** |
| 555 | * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu |
| 556 | * access, idling previous GPU operations on the buffer and optionally |
| 557 | * blocking it for further command submissions. |
| 558 | * |
| 559 | * @user_bo: Pointer to the buffer object being grabbed for CPU access |
| 560 | * @tfile: Identifying the caller. |
| 561 | * @flags: Flags indicating how the grab should be performed. |
| 562 | * |
| 563 | * A blocking grab will be automatically released when @tfile is closed. |
| 564 | */ |
| 565 | static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, |
| 566 | struct ttm_object_file *tfile, |
| 567 | uint32_t flags) |
| 568 | { |
| 569 | struct ttm_buffer_object *bo = &user_bo->dma.base; |
| 570 | bool existed; |
| 571 | int ret; |
| 572 | |
| 573 | if (flags & drm_vmw_synccpu_allow_cs) { |
| 574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
| 575 | long lret; |
| 576 | |
| 577 | if (nonblock) |
| 578 | return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; |
| 579 | |
| 580 | lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); |
| 581 | if (!lret) |
| 582 | return -EBUSY; |
| 583 | else if (lret < 0) |
| 584 | return lret; |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | ret = ttm_bo_synccpu_write_grab |
| 589 | (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
| 590 | if (unlikely(ret != 0)) |
| 591 | return ret; |
| 592 | |
| 593 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, |
| 594 | TTM_REF_SYNCCPU_WRITE, &existed, false); |
| 595 | if (ret != 0 || existed) |
| 596 | ttm_bo_synccpu_write_release(&user_bo->dma.base); |
| 597 | |
| 598 | return ret; |
| 599 | } |
| 600 | |
| 601 | /** |
| 602 | * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, |
| 603 | * and unblock command submission on the buffer if blocked. |
| 604 | * |
| 605 | * @handle: Handle identifying the buffer object. |
| 606 | * @tfile: Identifying the caller. |
| 607 | * @flags: Flags indicating the type of release. |
| 608 | */ |
| 609 | static int vmw_user_dmabuf_synccpu_release(uint32_t handle, |
| 610 | struct ttm_object_file *tfile, |
| 611 | uint32_t flags) |
| 612 | { |
| 613 | if (!(flags & drm_vmw_synccpu_allow_cs)) |
| 614 | return ttm_ref_object_base_unref(tfile, handle, |
| 615 | TTM_REF_SYNCCPU_WRITE); |
| 616 | |
| 617 | return 0; |
| 618 | } |
| 619 | |
| 620 | /** |
| 621 | * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu |
| 622 | * functionality. |
| 623 | * |
| 624 | * @dev: Identifies the drm device. |
| 625 | * @data: Pointer to the ioctl argument. |
| 626 | * @file_priv: Identifies the caller. |
| 627 | * |
| 628 | * This function checks the ioctl arguments for validity and calls the |
| 629 | * relevant synccpu functions. |
| 630 | */ |
| 631 | int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, |
| 632 | struct drm_file *file_priv) |
| 633 | { |
| 634 | struct drm_vmw_synccpu_arg *arg = |
| 635 | (struct drm_vmw_synccpu_arg *) data; |
| 636 | struct vmw_dma_buffer *dma_buf; |
| 637 | struct vmw_user_dma_buffer *user_bo; |
| 638 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 639 | struct ttm_base_object *buffer_base; |
| 640 | int ret; |
| 641 | |
| 642 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
| 643 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | |
| 644 | drm_vmw_synccpu_dontblock | |
| 645 | drm_vmw_synccpu_allow_cs)) != 0) { |
| 646 | DRM_ERROR("Illegal synccpu flags.\n"); |
| 647 | return -EINVAL; |
| 648 | } |
| 649 | |
| 650 | switch (arg->op) { |
| 651 | case drm_vmw_synccpu_grab: |
| 652 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, |
| 653 | &buffer_base); |
| 654 | if (unlikely(ret != 0)) |
| 655 | return ret; |
| 656 | |
| 657 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, |
| 658 | dma); |
| 659 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
| 660 | vmw_dmabuf_unreference(&dma_buf); |
| 661 | ttm_base_object_unref(&buffer_base); |
| 662 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
| 663 | ret != -EBUSY)) { |
| 664 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
| 665 | (unsigned int) arg->handle); |
| 666 | return ret; |
| 667 | } |
| 668 | break; |
| 669 | case drm_vmw_synccpu_release: |
| 670 | ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, |
| 671 | arg->flags); |
| 672 | if (unlikely(ret != 0)) { |
| 673 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", |
| 674 | (unsigned int) arg->handle); |
| 675 | return ret; |
| 676 | } |
| 677 | break; |
| 678 | default: |
| 679 | DRM_ERROR("Invalid synccpu operation.\n"); |
| 680 | return -EINVAL; |
| 681 | } |
| 682 | |
| 683 | return 0; |
| 684 | } |
| 685 | |
| 686 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
| 687 | struct drm_file *file_priv) |
| 688 | { |
| 689 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 690 | union drm_vmw_alloc_dmabuf_arg *arg = |
| 691 | (union drm_vmw_alloc_dmabuf_arg *)data; |
| 692 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; |
| 693 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; |
| 694 | struct vmw_dma_buffer *dma_buf; |
| 695 | uint32_t handle; |
| 696 | int ret; |
| 697 | |
| 698 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
| 699 | if (unlikely(ret != 0)) |
| 700 | return ret; |
| 701 | |
| 702 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
| 703 | req->size, false, &handle, &dma_buf, |
| 704 | NULL); |
| 705 | if (unlikely(ret != 0)) |
| 706 | goto out_no_dmabuf; |
| 707 | |
| 708 | rep->handle = handle; |
| 709 | rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); |
| 710 | rep->cur_gmr_id = handle; |
| 711 | rep->cur_gmr_offset = 0; |
| 712 | |
| 713 | vmw_dmabuf_unreference(&dma_buf); |
| 714 | |
| 715 | out_no_dmabuf: |
| 716 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 717 | |
| 718 | return ret; |
| 719 | } |
| 720 | |
| 721 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
| 722 | struct drm_file *file_priv) |
| 723 | { |
| 724 | struct drm_vmw_unref_dmabuf_arg *arg = |
| 725 | (struct drm_vmw_unref_dmabuf_arg *)data; |
| 726 | |
| 727 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
| 728 | arg->handle, |
| 729 | TTM_REF_USAGE); |
| 730 | } |
| 731 | |
| 732 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
| 733 | uint32_t handle, struct vmw_dma_buffer **out, |
| 734 | struct ttm_base_object **p_base) |
| 735 | { |
| 736 | struct vmw_user_dma_buffer *vmw_user_bo; |
| 737 | struct ttm_base_object *base; |
| 738 | |
| 739 | base = ttm_base_object_lookup(tfile, handle); |
| 740 | if (unlikely(base == NULL)) { |
| 741 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
| 742 | (unsigned long)handle); |
| 743 | return -ESRCH; |
| 744 | } |
| 745 | |
| 746 | if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { |
| 747 | ttm_base_object_unref(&base); |
| 748 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
| 749 | (unsigned long)handle); |
| 750 | return -EINVAL; |
| 751 | } |
| 752 | |
| 753 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
| 754 | prime.base); |
| 755 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
| 756 | if (p_base) |
| 757 | *p_base = base; |
| 758 | else |
| 759 | ttm_base_object_unref(&base); |
| 760 | *out = &vmw_user_bo->dma; |
| 761 | |
| 762 | return 0; |
| 763 | } |
| 764 | |
| 765 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
| 766 | struct vmw_dma_buffer *dma_buf, |
| 767 | uint32_t *handle) |
| 768 | { |
| 769 | struct vmw_user_dma_buffer *user_bo; |
| 770 | |
| 771 | if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) |
| 772 | return -EINVAL; |
| 773 | |
| 774 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
| 775 | |
| 776 | *handle = user_bo->prime.base.hash.key; |
| 777 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
| 778 | TTM_REF_USAGE, NULL, false); |
| 779 | } |
| 780 | |
| 781 | /* |
| 782 | * Stream management |
| 783 | */ |
| 784 | |
| 785 | static void vmw_stream_destroy(struct vmw_resource *res) |
| 786 | { |
| 787 | struct vmw_private *dev_priv = res->dev_priv; |
| 788 | struct vmw_stream *stream; |
| 789 | int ret; |
| 790 | |
| 791 | DRM_INFO("%s: unref\n", __func__); |
| 792 | stream = container_of(res, struct vmw_stream, res); |
| 793 | |
| 794 | ret = vmw_overlay_unref(dev_priv, stream->stream_id); |
| 795 | WARN_ON(ret != 0); |
| 796 | } |
| 797 | |
| 798 | static int vmw_stream_init(struct vmw_private *dev_priv, |
| 799 | struct vmw_stream *stream, |
| 800 | void (*res_free) (struct vmw_resource *res)) |
| 801 | { |
| 802 | struct vmw_resource *res = &stream->res; |
| 803 | int ret; |
| 804 | |
| 805 | ret = vmw_resource_init(dev_priv, res, false, res_free, |
| 806 | &vmw_stream_func); |
| 807 | |
| 808 | if (unlikely(ret != 0)) { |
| 809 | if (res_free == NULL) |
| 810 | kfree(stream); |
| 811 | else |
| 812 | res_free(&stream->res); |
| 813 | return ret; |
| 814 | } |
| 815 | |
| 816 | ret = vmw_overlay_claim(dev_priv, &stream->stream_id); |
| 817 | if (ret) { |
| 818 | vmw_resource_unreference(&res); |
| 819 | return ret; |
| 820 | } |
| 821 | |
| 822 | DRM_INFO("%s: claimed\n", __func__); |
| 823 | |
| 824 | vmw_resource_activate(&stream->res, vmw_stream_destroy); |
| 825 | return 0; |
| 826 | } |
| 827 | |
| 828 | static void vmw_user_stream_free(struct vmw_resource *res) |
| 829 | { |
| 830 | struct vmw_user_stream *stream = |
| 831 | container_of(res, struct vmw_user_stream, stream.res); |
| 832 | struct vmw_private *dev_priv = res->dev_priv; |
| 833 | |
| 834 | ttm_base_object_kfree(stream, base); |
| 835 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
| 836 | vmw_user_stream_size); |
| 837 | } |
| 838 | |
| 839 | /** |
| 840 | * This function is called when user space has no more references on the |
| 841 | * base object. It releases the base-object's reference on the resource object. |
| 842 | */ |
| 843 | |
| 844 | static void vmw_user_stream_base_release(struct ttm_base_object **p_base) |
| 845 | { |
| 846 | struct ttm_base_object *base = *p_base; |
| 847 | struct vmw_user_stream *stream = |
| 848 | container_of(base, struct vmw_user_stream, base); |
| 849 | struct vmw_resource *res = &stream->stream.res; |
| 850 | |
| 851 | *p_base = NULL; |
| 852 | vmw_resource_unreference(&res); |
| 853 | } |
| 854 | |
| 855 | int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
| 856 | struct drm_file *file_priv) |
| 857 | { |
| 858 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 859 | struct vmw_resource *res; |
| 860 | struct vmw_user_stream *stream; |
| 861 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
| 862 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 863 | struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; |
| 864 | int ret = 0; |
| 865 | |
| 866 | |
| 867 | res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); |
| 868 | if (unlikely(res == NULL)) |
| 869 | return -EINVAL; |
| 870 | |
| 871 | if (res->res_free != &vmw_user_stream_free) { |
| 872 | ret = -EINVAL; |
| 873 | goto out; |
| 874 | } |
| 875 | |
| 876 | stream = container_of(res, struct vmw_user_stream, stream.res); |
| 877 | if (stream->base.tfile != tfile) { |
| 878 | ret = -EINVAL; |
| 879 | goto out; |
| 880 | } |
| 881 | |
| 882 | ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); |
| 883 | out: |
| 884 | vmw_resource_unreference(&res); |
| 885 | return ret; |
| 886 | } |
| 887 | |
| 888 | int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
| 889 | struct drm_file *file_priv) |
| 890 | { |
| 891 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 892 | struct vmw_user_stream *stream; |
| 893 | struct vmw_resource *res; |
| 894 | struct vmw_resource *tmp; |
| 895 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
| 896 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 897 | int ret; |
| 898 | |
| 899 | /* |
| 900 | * Approximate idr memory usage with 128 bytes. It will be limited |
| 901 | * by maximum number_of streams anyway? |
| 902 | */ |
| 903 | |
| 904 | if (unlikely(vmw_user_stream_size == 0)) |
| 905 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; |
| 906 | |
| 907 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
| 908 | if (unlikely(ret != 0)) |
| 909 | return ret; |
| 910 | |
| 911 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
| 912 | vmw_user_stream_size, |
| 913 | false, true); |
| 914 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 915 | if (unlikely(ret != 0)) { |
| 916 | if (ret != -ERESTARTSYS) |
| 917 | DRM_ERROR("Out of graphics memory for stream" |
| 918 | " creation.\n"); |
| 919 | |
| 920 | goto out_ret; |
| 921 | } |
| 922 | |
| 923 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); |
| 924 | if (unlikely(stream == NULL)) { |
| 925 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
| 926 | vmw_user_stream_size); |
| 927 | ret = -ENOMEM; |
| 928 | goto out_ret; |
| 929 | } |
| 930 | |
| 931 | res = &stream->stream.res; |
| 932 | stream->base.shareable = false; |
| 933 | stream->base.tfile = NULL; |
| 934 | |
| 935 | /* |
| 936 | * From here on, the destructor takes over resource freeing. |
| 937 | */ |
| 938 | |
| 939 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
| 940 | if (unlikely(ret != 0)) |
| 941 | goto out_ret; |
| 942 | |
| 943 | tmp = vmw_resource_reference(res); |
| 944 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, |
| 945 | &vmw_user_stream_base_release, NULL); |
| 946 | |
| 947 | if (unlikely(ret != 0)) { |
| 948 | vmw_resource_unreference(&tmp); |
| 949 | goto out_err; |
| 950 | } |
| 951 | |
| 952 | arg->stream_id = res->id; |
| 953 | out_err: |
| 954 | vmw_resource_unreference(&res); |
| 955 | out_ret: |
| 956 | return ret; |
| 957 | } |
| 958 | |
| 959 | int vmw_user_stream_lookup(struct vmw_private *dev_priv, |
| 960 | struct ttm_object_file *tfile, |
| 961 | uint32_t *inout_id, struct vmw_resource **out) |
| 962 | { |
| 963 | struct vmw_user_stream *stream; |
| 964 | struct vmw_resource *res; |
| 965 | int ret; |
| 966 | |
| 967 | res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], |
| 968 | *inout_id); |
| 969 | if (unlikely(res == NULL)) |
| 970 | return -EINVAL; |
| 971 | |
| 972 | if (res->res_free != &vmw_user_stream_free) { |
| 973 | ret = -EINVAL; |
| 974 | goto err_ref; |
| 975 | } |
| 976 | |
| 977 | stream = container_of(res, struct vmw_user_stream, stream.res); |
| 978 | if (stream->base.tfile != tfile) { |
| 979 | ret = -EPERM; |
| 980 | goto err_ref; |
| 981 | } |
| 982 | |
| 983 | *inout_id = stream->stream.stream_id; |
| 984 | *out = res; |
| 985 | return 0; |
| 986 | err_ref: |
| 987 | vmw_resource_unreference(&res); |
| 988 | return ret; |
| 989 | } |
| 990 | |
| 991 | |
| 992 | /** |
| 993 | * vmw_dumb_create - Create a dumb kms buffer |
| 994 | * |
| 995 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
| 996 | * @dev: Pointer to the drm device. |
| 997 | * @args: Pointer to a struct drm_mode_create_dumb structure |
| 998 | * |
| 999 | * This is a driver callback for the core drm create_dumb functionality. |
| 1000 | * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except |
| 1001 | * that the arguments have a different format. |
| 1002 | */ |
| 1003 | int vmw_dumb_create(struct drm_file *file_priv, |
| 1004 | struct drm_device *dev, |
| 1005 | struct drm_mode_create_dumb *args) |
| 1006 | { |
| 1007 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1008 | struct vmw_dma_buffer *dma_buf; |
| 1009 | int ret; |
| 1010 | |
| 1011 | args->pitch = args->width * ((args->bpp + 7) / 8); |
| 1012 | args->size = args->pitch * args->height; |
| 1013 | |
| 1014 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
| 1015 | if (unlikely(ret != 0)) |
| 1016 | return ret; |
| 1017 | |
| 1018 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
| 1019 | args->size, false, &args->handle, |
| 1020 | &dma_buf, NULL); |
| 1021 | if (unlikely(ret != 0)) |
| 1022 | goto out_no_dmabuf; |
| 1023 | |
| 1024 | vmw_dmabuf_unreference(&dma_buf); |
| 1025 | out_no_dmabuf: |
| 1026 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 1027 | return ret; |
| 1028 | } |
| 1029 | |
| 1030 | /** |
| 1031 | * vmw_dumb_map_offset - Return the address space offset of a dumb buffer |
| 1032 | * |
| 1033 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
| 1034 | * @dev: Pointer to the drm device. |
| 1035 | * @handle: Handle identifying the dumb buffer. |
| 1036 | * @offset: The address space offset returned. |
| 1037 | * |
| 1038 | * This is a driver callback for the core drm dumb_map_offset functionality. |
| 1039 | */ |
| 1040 | int vmw_dumb_map_offset(struct drm_file *file_priv, |
| 1041 | struct drm_device *dev, uint32_t handle, |
| 1042 | uint64_t *offset) |
| 1043 | { |
| 1044 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 1045 | struct vmw_dma_buffer *out_buf; |
| 1046 | int ret; |
| 1047 | |
| 1048 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); |
| 1049 | if (ret != 0) |
| 1050 | return -EINVAL; |
| 1051 | |
| 1052 | *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); |
| 1053 | vmw_dmabuf_unreference(&out_buf); |
| 1054 | return 0; |
| 1055 | } |
| 1056 | |
| 1057 | /** |
| 1058 | * vmw_dumb_destroy - Destroy a dumb boffer |
| 1059 | * |
| 1060 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
| 1061 | * @dev: Pointer to the drm device. |
| 1062 | * @handle: Handle identifying the dumb buffer. |
| 1063 | * |
| 1064 | * This is a driver callback for the core drm dumb_destroy functionality. |
| 1065 | */ |
| 1066 | int vmw_dumb_destroy(struct drm_file *file_priv, |
| 1067 | struct drm_device *dev, |
| 1068 | uint32_t handle) |
| 1069 | { |
| 1070 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
| 1071 | handle, TTM_REF_USAGE); |
| 1072 | } |
| 1073 | |
| 1074 | /** |
| 1075 | * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. |
| 1076 | * |
| 1077 | * @res: The resource for which to allocate a backup buffer. |
| 1078 | * @interruptible: Whether any sleeps during allocation should be |
| 1079 | * performed while interruptible. |
| 1080 | */ |
| 1081 | static int vmw_resource_buf_alloc(struct vmw_resource *res, |
| 1082 | bool interruptible) |
| 1083 | { |
| 1084 | unsigned long size = |
| 1085 | (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; |
| 1086 | struct vmw_dma_buffer *backup; |
| 1087 | int ret; |
| 1088 | |
| 1089 | if (likely(res->backup)) { |
| 1090 | BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); |
| 1091 | return 0; |
| 1092 | } |
| 1093 | |
| 1094 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); |
| 1095 | if (unlikely(backup == NULL)) |
| 1096 | return -ENOMEM; |
| 1097 | |
| 1098 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, |
| 1099 | res->func->backup_placement, |
| 1100 | interruptible, |
| 1101 | &vmw_dmabuf_bo_free); |
| 1102 | if (unlikely(ret != 0)) |
| 1103 | goto out_no_dmabuf; |
| 1104 | |
| 1105 | res->backup = backup; |
| 1106 | |
| 1107 | out_no_dmabuf: |
| 1108 | return ret; |
| 1109 | } |
| 1110 | |
| 1111 | /** |
| 1112 | * vmw_resource_do_validate - Make a resource up-to-date and visible |
| 1113 | * to the device. |
| 1114 | * |
| 1115 | * @res: The resource to make visible to the device. |
| 1116 | * @val_buf: Information about a buffer possibly |
| 1117 | * containing backup data if a bind operation is needed. |
| 1118 | * |
| 1119 | * On hardware resource shortage, this function returns -EBUSY and |
| 1120 | * should be retried once resources have been freed up. |
| 1121 | */ |
| 1122 | static int vmw_resource_do_validate(struct vmw_resource *res, |
| 1123 | struct ttm_validate_buffer *val_buf) |
| 1124 | { |
| 1125 | int ret = 0; |
| 1126 | const struct vmw_res_func *func = res->func; |
| 1127 | |
| 1128 | if (unlikely(res->id == -1)) { |
| 1129 | ret = func->create(res); |
| 1130 | if (unlikely(ret != 0)) |
| 1131 | return ret; |
| 1132 | } |
| 1133 | |
| 1134 | if (func->bind && |
| 1135 | ((func->needs_backup && list_empty(&res->mob_head) && |
| 1136 | val_buf->bo != NULL) || |
| 1137 | (!func->needs_backup && val_buf->bo != NULL))) { |
| 1138 | ret = func->bind(res, val_buf); |
| 1139 | if (unlikely(ret != 0)) |
| 1140 | goto out_bind_failed; |
| 1141 | if (func->needs_backup) |
| 1142 | list_add_tail(&res->mob_head, &res->backup->res_list); |
| 1143 | } |
| 1144 | |
| 1145 | /* |
| 1146 | * Only do this on write operations, and move to |
| 1147 | * vmw_resource_unreserve if it can be called after |
| 1148 | * backup buffers have been unreserved. Otherwise |
| 1149 | * sort out locking. |
| 1150 | */ |
| 1151 | res->res_dirty = true; |
| 1152 | |
| 1153 | return 0; |
| 1154 | |
| 1155 | out_bind_failed: |
| 1156 | func->destroy(res); |
| 1157 | |
| 1158 | return ret; |
| 1159 | } |
| 1160 | |
| 1161 | /** |
| 1162 | * vmw_resource_unreserve - Unreserve a resource previously reserved for |
| 1163 | * command submission. |
| 1164 | * |
| 1165 | * @res: Pointer to the struct vmw_resource to unreserve. |
| 1166 | * @switch_backup: Backup buffer has been switched. |
| 1167 | * @new_backup: Pointer to new backup buffer if command submission |
| 1168 | * switched. May be NULL. |
| 1169 | * @new_backup_offset: New backup offset if @switch_backup is true. |
| 1170 | * |
| 1171 | * Currently unreserving a resource means putting it back on the device's |
| 1172 | * resource lru list, so that it can be evicted if necessary. |
| 1173 | */ |
| 1174 | void vmw_resource_unreserve(struct vmw_resource *res, |
| 1175 | bool switch_backup, |
| 1176 | struct vmw_dma_buffer *new_backup, |
| 1177 | unsigned long new_backup_offset) |
| 1178 | { |
| 1179 | struct vmw_private *dev_priv = res->dev_priv; |
| 1180 | |
| 1181 | if (!list_empty(&res->lru_head)) |
| 1182 | return; |
| 1183 | |
| 1184 | if (switch_backup && new_backup != res->backup) { |
| 1185 | if (res->backup) { |
| 1186 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
| 1187 | list_del_init(&res->mob_head); |
| 1188 | vmw_dmabuf_unreference(&res->backup); |
| 1189 | } |
| 1190 | |
| 1191 | if (new_backup) { |
| 1192 | res->backup = vmw_dmabuf_reference(new_backup); |
| 1193 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
| 1194 | list_add_tail(&res->mob_head, &new_backup->res_list); |
| 1195 | } else { |
| 1196 | res->backup = NULL; |
| 1197 | } |
| 1198 | } |
| 1199 | if (switch_backup) |
| 1200 | res->backup_offset = new_backup_offset; |
| 1201 | |
| 1202 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
| 1203 | return; |
| 1204 | |
| 1205 | write_lock(&dev_priv->resource_lock); |
| 1206 | list_add_tail(&res->lru_head, |
| 1207 | &res->dev_priv->res_lru[res->func->res_type]); |
| 1208 | write_unlock(&dev_priv->resource_lock); |
| 1209 | } |
| 1210 | |
| 1211 | /** |
| 1212 | * vmw_resource_check_buffer - Check whether a backup buffer is needed |
| 1213 | * for a resource and in that case, allocate |
| 1214 | * one, reserve and validate it. |
| 1215 | * |
| 1216 | * @res: The resource for which to allocate a backup buffer. |
| 1217 | * @interruptible: Whether any sleeps during allocation should be |
| 1218 | * performed while interruptible. |
| 1219 | * @val_buf: On successful return contains data about the |
| 1220 | * reserved and validated backup buffer. |
| 1221 | */ |
| 1222 | static int |
| 1223 | vmw_resource_check_buffer(struct vmw_resource *res, |
| 1224 | bool interruptible, |
| 1225 | struct ttm_validate_buffer *val_buf) |
| 1226 | { |
| 1227 | struct list_head val_list; |
| 1228 | bool backup_dirty = false; |
| 1229 | int ret; |
| 1230 | |
| 1231 | if (unlikely(res->backup == NULL)) { |
| 1232 | ret = vmw_resource_buf_alloc(res, interruptible); |
| 1233 | if (unlikely(ret != 0)) |
| 1234 | return ret; |
| 1235 | } |
| 1236 | |
| 1237 | INIT_LIST_HEAD(&val_list); |
| 1238 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
| 1239 | val_buf->shared = false; |
| 1240 | list_add_tail(&val_buf->head, &val_list); |
| 1241 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); |
| 1242 | if (unlikely(ret != 0)) |
| 1243 | goto out_no_reserve; |
| 1244 | |
| 1245 | if (res->func->needs_backup && list_empty(&res->mob_head)) |
| 1246 | return 0; |
| 1247 | |
| 1248 | backup_dirty = res->backup_dirty; |
| 1249 | ret = ttm_bo_validate(&res->backup->base, |
| 1250 | res->func->backup_placement, |
| 1251 | true, false); |
| 1252 | |
| 1253 | if (unlikely(ret != 0)) |
| 1254 | goto out_no_validate; |
| 1255 | |
| 1256 | return 0; |
| 1257 | |
| 1258 | out_no_validate: |
| 1259 | ttm_eu_backoff_reservation(NULL, &val_list); |
| 1260 | out_no_reserve: |
| 1261 | ttm_bo_unref(&val_buf->bo); |
| 1262 | if (backup_dirty) |
| 1263 | vmw_dmabuf_unreference(&res->backup); |
| 1264 | |
| 1265 | return ret; |
| 1266 | } |
| 1267 | |
| 1268 | /** |
| 1269 | * vmw_resource_reserve - Reserve a resource for command submission |
| 1270 | * |
| 1271 | * @res: The resource to reserve. |
| 1272 | * |
| 1273 | * This function takes the resource off the LRU list and make sure |
| 1274 | * a backup buffer is present for guest-backed resources. However, |
| 1275 | * the buffer may not be bound to the resource at this point. |
| 1276 | * |
| 1277 | */ |
| 1278 | int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
| 1279 | bool no_backup) |
| 1280 | { |
| 1281 | struct vmw_private *dev_priv = res->dev_priv; |
| 1282 | int ret; |
| 1283 | |
| 1284 | write_lock(&dev_priv->resource_lock); |
| 1285 | list_del_init(&res->lru_head); |
| 1286 | write_unlock(&dev_priv->resource_lock); |
| 1287 | |
| 1288 | if (res->func->needs_backup && res->backup == NULL && |
| 1289 | !no_backup) { |
| 1290 | ret = vmw_resource_buf_alloc(res, interruptible); |
| 1291 | if (unlikely(ret != 0)) { |
| 1292 | DRM_ERROR("Failed to allocate a backup buffer " |
| 1293 | "of size %lu. bytes\n", |
| 1294 | (unsigned long) res->backup_size); |
| 1295 | return ret; |
| 1296 | } |
| 1297 | } |
| 1298 | |
| 1299 | return 0; |
| 1300 | } |
| 1301 | |
| 1302 | /** |
| 1303 | * vmw_resource_backoff_reservation - Unreserve and unreference a |
| 1304 | * backup buffer |
| 1305 | *. |
| 1306 | * @val_buf: Backup buffer information. |
| 1307 | */ |
| 1308 | static void |
| 1309 | vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) |
| 1310 | { |
| 1311 | struct list_head val_list; |
| 1312 | |
| 1313 | if (likely(val_buf->bo == NULL)) |
| 1314 | return; |
| 1315 | |
| 1316 | INIT_LIST_HEAD(&val_list); |
| 1317 | list_add_tail(&val_buf->head, &val_list); |
| 1318 | ttm_eu_backoff_reservation(NULL, &val_list); |
| 1319 | ttm_bo_unref(&val_buf->bo); |
| 1320 | } |
| 1321 | |
| 1322 | /** |
| 1323 | * vmw_resource_do_evict - Evict a resource, and transfer its data |
| 1324 | * to a backup buffer. |
| 1325 | * |
| 1326 | * @res: The resource to evict. |
| 1327 | * @interruptible: Whether to wait interruptible. |
| 1328 | */ |
| 1329 | static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
| 1330 | { |
| 1331 | struct ttm_validate_buffer val_buf; |
| 1332 | const struct vmw_res_func *func = res->func; |
| 1333 | int ret; |
| 1334 | |
| 1335 | BUG_ON(!func->may_evict); |
| 1336 | |
| 1337 | val_buf.bo = NULL; |
| 1338 | val_buf.shared = false; |
| 1339 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
| 1340 | if (unlikely(ret != 0)) |
| 1341 | return ret; |
| 1342 | |
| 1343 | if (unlikely(func->unbind != NULL && |
| 1344 | (!func->needs_backup || !list_empty(&res->mob_head)))) { |
| 1345 | ret = func->unbind(res, res->res_dirty, &val_buf); |
| 1346 | if (unlikely(ret != 0)) |
| 1347 | goto out_no_unbind; |
| 1348 | list_del_init(&res->mob_head); |
| 1349 | } |
| 1350 | ret = func->destroy(res); |
| 1351 | res->backup_dirty = true; |
| 1352 | res->res_dirty = false; |
| 1353 | out_no_unbind: |
| 1354 | vmw_resource_backoff_reservation(&val_buf); |
| 1355 | |
| 1356 | return ret; |
| 1357 | } |
| 1358 | |
| 1359 | |
| 1360 | /** |
| 1361 | * vmw_resource_validate - Make a resource up-to-date and visible |
| 1362 | * to the device. |
| 1363 | * |
| 1364 | * @res: The resource to make visible to the device. |
| 1365 | * |
| 1366 | * On succesful return, any backup DMA buffer pointed to by @res->backup will |
| 1367 | * be reserved and validated. |
| 1368 | * On hardware resource shortage, this function will repeatedly evict |
| 1369 | * resources of the same type until the validation succeeds. |
| 1370 | */ |
| 1371 | int vmw_resource_validate(struct vmw_resource *res) |
| 1372 | { |
| 1373 | int ret; |
| 1374 | struct vmw_resource *evict_res; |
| 1375 | struct vmw_private *dev_priv = res->dev_priv; |
| 1376 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
| 1377 | struct ttm_validate_buffer val_buf; |
| 1378 | unsigned err_count = 0; |
| 1379 | |
| 1380 | if (!res->func->create) |
| 1381 | return 0; |
| 1382 | |
| 1383 | val_buf.bo = NULL; |
| 1384 | val_buf.shared = false; |
| 1385 | if (res->backup) |
| 1386 | val_buf.bo = &res->backup->base; |
| 1387 | do { |
| 1388 | ret = vmw_resource_do_validate(res, &val_buf); |
| 1389 | if (likely(ret != -EBUSY)) |
| 1390 | break; |
| 1391 | |
| 1392 | write_lock(&dev_priv->resource_lock); |
| 1393 | if (list_empty(lru_list) || !res->func->may_evict) { |
| 1394 | DRM_ERROR("Out of device device resources " |
| 1395 | "for %s.\n", res->func->type_name); |
| 1396 | ret = -EBUSY; |
| 1397 | write_unlock(&dev_priv->resource_lock); |
| 1398 | break; |
| 1399 | } |
| 1400 | |
| 1401 | evict_res = vmw_resource_reference |
| 1402 | (list_first_entry(lru_list, struct vmw_resource, |
| 1403 | lru_head)); |
| 1404 | list_del_init(&evict_res->lru_head); |
| 1405 | |
| 1406 | write_unlock(&dev_priv->resource_lock); |
| 1407 | |
| 1408 | ret = vmw_resource_do_evict(evict_res, true); |
| 1409 | if (unlikely(ret != 0)) { |
| 1410 | write_lock(&dev_priv->resource_lock); |
| 1411 | list_add_tail(&evict_res->lru_head, lru_list); |
| 1412 | write_unlock(&dev_priv->resource_lock); |
| 1413 | if (ret == -ERESTARTSYS || |
| 1414 | ++err_count > VMW_RES_EVICT_ERR_COUNT) { |
| 1415 | vmw_resource_unreference(&evict_res); |
| 1416 | goto out_no_validate; |
| 1417 | } |
| 1418 | } |
| 1419 | |
| 1420 | vmw_resource_unreference(&evict_res); |
| 1421 | } while (1); |
| 1422 | |
| 1423 | if (unlikely(ret != 0)) |
| 1424 | goto out_no_validate; |
| 1425 | else if (!res->func->needs_backup && res->backup) { |
| 1426 | list_del_init(&res->mob_head); |
| 1427 | vmw_dmabuf_unreference(&res->backup); |
| 1428 | } |
| 1429 | |
| 1430 | return 0; |
| 1431 | |
| 1432 | out_no_validate: |
| 1433 | return ret; |
| 1434 | } |
| 1435 | |
| 1436 | /** |
| 1437 | * vmw_fence_single_bo - Utility function to fence a single TTM buffer |
| 1438 | * object without unreserving it. |
| 1439 | * |
| 1440 | * @bo: Pointer to the struct ttm_buffer_object to fence. |
| 1441 | * @fence: Pointer to the fence. If NULL, this function will |
| 1442 | * insert a fence into the command stream.. |
| 1443 | * |
| 1444 | * Contrary to the ttm_eu version of this function, it takes only |
| 1445 | * a single buffer object instead of a list, and it also doesn't |
| 1446 | * unreserve the buffer object, which needs to be done separately. |
| 1447 | */ |
| 1448 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
| 1449 | struct vmw_fence_obj *fence) |
| 1450 | { |
| 1451 | struct ttm_bo_device *bdev = bo->bdev; |
| 1452 | |
| 1453 | struct vmw_private *dev_priv = |
| 1454 | container_of(bdev, struct vmw_private, bdev); |
| 1455 | |
| 1456 | if (fence == NULL) { |
| 1457 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
| 1458 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
| 1459 | fence_put(&fence->base); |
| 1460 | } else |
| 1461 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
| 1462 | } |
| 1463 | |
| 1464 | /** |
| 1465 | * vmw_resource_move_notify - TTM move_notify_callback |
| 1466 | * |
| 1467 | * @bo: The TTM buffer object about to move. |
| 1468 | * @mem: The struct ttm_mem_reg indicating to what memory |
| 1469 | * region the move is taking place. |
| 1470 | * |
| 1471 | * Evicts the Guest Backed hardware resource if the backup |
| 1472 | * buffer is being moved out of MOB memory. |
| 1473 | * Note that this function should not race with the resource |
| 1474 | * validation code as long as it accesses only members of struct |
| 1475 | * resource that remain static while bo::res is !NULL and |
| 1476 | * while we have @bo reserved. struct resource::backup is *not* a |
| 1477 | * static member. The resource validation code will take care |
| 1478 | * to set @bo::res to NULL, while having @bo reserved when the |
| 1479 | * buffer is no longer bound to the resource, so @bo:res can be |
| 1480 | * used to determine whether there is a need to unbind and whether |
| 1481 | * it is safe to unbind. |
| 1482 | */ |
| 1483 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
| 1484 | struct ttm_mem_reg *mem) |
| 1485 | { |
| 1486 | struct vmw_dma_buffer *dma_buf; |
| 1487 | |
| 1488 | if (mem == NULL) |
| 1489 | return; |
| 1490 | |
| 1491 | if (bo->destroy != vmw_dmabuf_bo_free && |
| 1492 | bo->destroy != vmw_user_dmabuf_destroy) |
| 1493 | return; |
| 1494 | |
| 1495 | dma_buf = container_of(bo, struct vmw_dma_buffer, base); |
| 1496 | |
| 1497 | if (mem->mem_type != VMW_PL_MOB) { |
| 1498 | struct vmw_resource *res, *n; |
| 1499 | struct ttm_validate_buffer val_buf; |
| 1500 | |
| 1501 | val_buf.bo = bo; |
| 1502 | val_buf.shared = false; |
| 1503 | |
| 1504 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { |
| 1505 | |
| 1506 | if (unlikely(res->func->unbind == NULL)) |
| 1507 | continue; |
| 1508 | |
| 1509 | (void) res->func->unbind(res, true, &val_buf); |
| 1510 | res->backup_dirty = true; |
| 1511 | res->res_dirty = false; |
| 1512 | list_del_init(&res->mob_head); |
| 1513 | } |
| 1514 | |
| 1515 | (void) ttm_bo_wait(bo, false, false, false); |
| 1516 | } |
| 1517 | } |
| 1518 | |
| 1519 | |
| 1520 | |
| 1521 | /** |
| 1522 | * vmw_query_readback_all - Read back cached query states |
| 1523 | * |
| 1524 | * @dx_query_mob: Buffer containing the DX query MOB |
| 1525 | * |
| 1526 | * Read back cached states from the device if they exist. This function |
| 1527 | * assumings binding_mutex is held. |
| 1528 | */ |
| 1529 | int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) |
| 1530 | { |
| 1531 | struct vmw_resource *dx_query_ctx; |
| 1532 | struct vmw_private *dev_priv; |
| 1533 | struct { |
| 1534 | SVGA3dCmdHeader header; |
| 1535 | SVGA3dCmdDXReadbackAllQuery body; |
| 1536 | } *cmd; |
| 1537 | |
| 1538 | |
| 1539 | /* No query bound, so do nothing */ |
| 1540 | if (!dx_query_mob || !dx_query_mob->dx_query_ctx) |
| 1541 | return 0; |
| 1542 | |
| 1543 | dx_query_ctx = dx_query_mob->dx_query_ctx; |
| 1544 | dev_priv = dx_query_ctx->dev_priv; |
| 1545 | |
| 1546 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); |
| 1547 | if (unlikely(cmd == NULL)) { |
| 1548 | DRM_ERROR("Failed reserving FIFO space for " |
| 1549 | "query MOB read back.\n"); |
| 1550 | return -ENOMEM; |
| 1551 | } |
| 1552 | |
| 1553 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; |
| 1554 | cmd->header.size = sizeof(cmd->body); |
| 1555 | cmd->body.cid = dx_query_ctx->id; |
| 1556 | |
| 1557 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 1558 | |
| 1559 | /* Triggers a rebind the next time affected context is bound */ |
| 1560 | dx_query_mob->dx_query_ctx = NULL; |
| 1561 | |
| 1562 | return 0; |
| 1563 | } |
| 1564 | |
| 1565 | |
| 1566 | |
| 1567 | /** |
| 1568 | * vmw_query_move_notify - Read back cached query states |
| 1569 | * |
| 1570 | * @bo: The TTM buffer object about to move. |
| 1571 | * @mem: The memory region @bo is moving to. |
| 1572 | * |
| 1573 | * Called before the query MOB is swapped out to read back cached query |
| 1574 | * states from the device. |
| 1575 | */ |
| 1576 | void vmw_query_move_notify(struct ttm_buffer_object *bo, |
| 1577 | struct ttm_mem_reg *mem) |
| 1578 | { |
| 1579 | struct vmw_dma_buffer *dx_query_mob; |
| 1580 | struct ttm_bo_device *bdev = bo->bdev; |
| 1581 | struct vmw_private *dev_priv; |
| 1582 | |
| 1583 | |
| 1584 | dev_priv = container_of(bdev, struct vmw_private, bdev); |
| 1585 | |
| 1586 | mutex_lock(&dev_priv->binding_mutex); |
| 1587 | |
| 1588 | dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); |
| 1589 | if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { |
| 1590 | mutex_unlock(&dev_priv->binding_mutex); |
| 1591 | return; |
| 1592 | } |
| 1593 | |
| 1594 | /* If BO is being moved from MOB to system memory */ |
| 1595 | if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) { |
| 1596 | struct vmw_fence_obj *fence; |
| 1597 | |
| 1598 | (void) vmw_query_readback_all(dx_query_mob); |
| 1599 | mutex_unlock(&dev_priv->binding_mutex); |
| 1600 | |
| 1601 | /* Create a fence and attach the BO to it */ |
| 1602 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
| 1603 | vmw_fence_single_bo(bo, fence); |
| 1604 | |
| 1605 | if (fence != NULL) |
| 1606 | vmw_fence_obj_unreference(&fence); |
| 1607 | |
| 1608 | (void) ttm_bo_wait(bo, false, false, false); |
| 1609 | } else |
| 1610 | mutex_unlock(&dev_priv->binding_mutex); |
| 1611 | |
| 1612 | } |
| 1613 | |
| 1614 | /** |
| 1615 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
| 1616 | * |
| 1617 | * @res: The resource being queried. |
| 1618 | */ |
| 1619 | bool vmw_resource_needs_backup(const struct vmw_resource *res) |
| 1620 | { |
| 1621 | return res->func->needs_backup; |
| 1622 | } |
| 1623 | |
| 1624 | /** |
| 1625 | * vmw_resource_evict_type - Evict all resources of a specific type |
| 1626 | * |
| 1627 | * @dev_priv: Pointer to a device private struct |
| 1628 | * @type: The resource type to evict |
| 1629 | * |
| 1630 | * To avoid thrashing starvation or as part of the hibernation sequence, |
| 1631 | * try to evict all evictable resources of a specific type. |
| 1632 | */ |
| 1633 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, |
| 1634 | enum vmw_res_type type) |
| 1635 | { |
| 1636 | struct list_head *lru_list = &dev_priv->res_lru[type]; |
| 1637 | struct vmw_resource *evict_res; |
| 1638 | unsigned err_count = 0; |
| 1639 | int ret; |
| 1640 | |
| 1641 | do { |
| 1642 | write_lock(&dev_priv->resource_lock); |
| 1643 | |
| 1644 | if (list_empty(lru_list)) |
| 1645 | goto out_unlock; |
| 1646 | |
| 1647 | evict_res = vmw_resource_reference( |
| 1648 | list_first_entry(lru_list, struct vmw_resource, |
| 1649 | lru_head)); |
| 1650 | list_del_init(&evict_res->lru_head); |
| 1651 | write_unlock(&dev_priv->resource_lock); |
| 1652 | |
| 1653 | ret = vmw_resource_do_evict(evict_res, false); |
| 1654 | if (unlikely(ret != 0)) { |
| 1655 | write_lock(&dev_priv->resource_lock); |
| 1656 | list_add_tail(&evict_res->lru_head, lru_list); |
| 1657 | write_unlock(&dev_priv->resource_lock); |
| 1658 | if (++err_count > VMW_RES_EVICT_ERR_COUNT) { |
| 1659 | vmw_resource_unreference(&evict_res); |
| 1660 | return; |
| 1661 | } |
| 1662 | } |
| 1663 | |
| 1664 | vmw_resource_unreference(&evict_res); |
| 1665 | } while (1); |
| 1666 | |
| 1667 | out_unlock: |
| 1668 | write_unlock(&dev_priv->resource_lock); |
| 1669 | } |
| 1670 | |
| 1671 | /** |
| 1672 | * vmw_resource_evict_all - Evict all evictable resources |
| 1673 | * |
| 1674 | * @dev_priv: Pointer to a device private struct |
| 1675 | * |
| 1676 | * To avoid thrashing starvation or as part of the hibernation sequence, |
| 1677 | * evict all evictable resources. In particular this means that all |
| 1678 | * guest-backed resources that are registered with the device are |
| 1679 | * evicted and the OTable becomes clean. |
| 1680 | */ |
| 1681 | void vmw_resource_evict_all(struct vmw_private *dev_priv) |
| 1682 | { |
| 1683 | enum vmw_res_type type; |
| 1684 | |
| 1685 | mutex_lock(&dev_priv->cmdbuf_mutex); |
| 1686 | |
| 1687 | for (type = 0; type < vmw_res_max; ++type) |
| 1688 | vmw_resource_evict_type(dev_priv, type); |
| 1689 | |
| 1690 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1691 | } |
| 1692 | |
| 1693 | /** |
| 1694 | * vmw_resource_pin - Add a pin reference on a resource |
| 1695 | * |
| 1696 | * @res: The resource to add a pin reference on |
| 1697 | * |
| 1698 | * This function adds a pin reference, and if needed validates the resource. |
| 1699 | * Having a pin reference means that the resource can never be evicted, and |
| 1700 | * its id will never change as long as there is a pin reference. |
| 1701 | * This function returns 0 on success and a negative error code on failure. |
| 1702 | */ |
| 1703 | int vmw_resource_pin(struct vmw_resource *res, bool interruptible) |
| 1704 | { |
| 1705 | struct vmw_private *dev_priv = res->dev_priv; |
| 1706 | int ret; |
| 1707 | |
| 1708 | ttm_write_lock(&dev_priv->reservation_sem, interruptible); |
| 1709 | mutex_lock(&dev_priv->cmdbuf_mutex); |
| 1710 | ret = vmw_resource_reserve(res, interruptible, false); |
| 1711 | if (ret) |
| 1712 | goto out_no_reserve; |
| 1713 | |
| 1714 | if (res->pin_count == 0) { |
| 1715 | struct vmw_dma_buffer *vbo = NULL; |
| 1716 | |
| 1717 | if (res->backup) { |
| 1718 | vbo = res->backup; |
| 1719 | |
| 1720 | ttm_bo_reserve(&vbo->base, interruptible, false, false, |
| 1721 | NULL); |
| 1722 | if (!vbo->pin_count) { |
| 1723 | ret = ttm_bo_validate |
| 1724 | (&vbo->base, |
| 1725 | res->func->backup_placement, |
| 1726 | interruptible, false); |
| 1727 | if (ret) { |
| 1728 | ttm_bo_unreserve(&vbo->base); |
| 1729 | goto out_no_validate; |
| 1730 | } |
| 1731 | } |
| 1732 | |
| 1733 | /* Do we really need to pin the MOB as well? */ |
| 1734 | vmw_bo_pin_reserved(vbo, true); |
| 1735 | } |
| 1736 | ret = vmw_resource_validate(res); |
| 1737 | if (vbo) |
| 1738 | ttm_bo_unreserve(&vbo->base); |
| 1739 | if (ret) |
| 1740 | goto out_no_validate; |
| 1741 | } |
| 1742 | res->pin_count++; |
| 1743 | |
| 1744 | out_no_validate: |
| 1745 | vmw_resource_unreserve(res, false, NULL, 0UL); |
| 1746 | out_no_reserve: |
| 1747 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1748 | ttm_write_unlock(&dev_priv->reservation_sem); |
| 1749 | |
| 1750 | return ret; |
| 1751 | } |
| 1752 | |
| 1753 | /** |
| 1754 | * vmw_resource_unpin - Remove a pin reference from a resource |
| 1755 | * |
| 1756 | * @res: The resource to remove a pin reference from |
| 1757 | * |
| 1758 | * Having a pin reference means that the resource can never be evicted, and |
| 1759 | * its id will never change as long as there is a pin reference. |
| 1760 | */ |
| 1761 | void vmw_resource_unpin(struct vmw_resource *res) |
| 1762 | { |
| 1763 | struct vmw_private *dev_priv = res->dev_priv; |
| 1764 | int ret; |
| 1765 | |
| 1766 | ttm_read_lock(&dev_priv->reservation_sem, false); |
| 1767 | mutex_lock(&dev_priv->cmdbuf_mutex); |
| 1768 | |
| 1769 | ret = vmw_resource_reserve(res, false, true); |
| 1770 | WARN_ON(ret); |
| 1771 | |
| 1772 | WARN_ON(res->pin_count == 0); |
| 1773 | if (--res->pin_count == 0 && res->backup) { |
| 1774 | struct vmw_dma_buffer *vbo = res->backup; |
| 1775 | |
| 1776 | ttm_bo_reserve(&vbo->base, false, false, false, NULL); |
| 1777 | vmw_bo_pin_reserved(vbo, false); |
| 1778 | ttm_bo_unreserve(&vbo->base); |
| 1779 | } |
| 1780 | |
| 1781 | vmw_resource_unreserve(res, false, NULL, 0UL); |
| 1782 | |
| 1783 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1784 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 1785 | } |
| 1786 | |
| 1787 | /** |
| 1788 | * vmw_res_type - Return the resource type |
| 1789 | * |
| 1790 | * @res: Pointer to the resource |
| 1791 | */ |
| 1792 | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) |
| 1793 | { |
| 1794 | return res->func->res_type; |
| 1795 | } |