1 /*- 2 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: 26 * Rickard E. (Rik) Faith <faith@valinux.com> 27 * Gareth Hughes <gareth@valinux.com> 28 * 29 */ 30 31 /** @file drm_lock.c 32 * Implementation of the ioctls and other support code for dealing with the 33 * hardware lock. 34 * 35 * The DRM hardware lock is a shared structure between the kernel and userland. 36 * 37 * On uncontended access where the new context was the last context, the 38 * client may take the lock without dropping down into the kernel, using atomic 39 * compare-and-set. 40 * 41 * If the client finds during compare-and-set that it was not the last owner 42 * of the lock, it calls the DRM lock ioctl, which may sleep waiting for the 43 * lock, and may have side-effects of kernel-managed context switching. 44 * 45 * When the client releases the lock, if the lock is marked as being contended 46 * by another client, then the DRM unlock ioctl is called so that the 47 * contending client may be woken up. 48 */ 49 50 #include "dev/drm/drmP.h" 51 52 int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) 53 { 54 struct drm_lock *lock = data; 55 int ret = 0; 56 57 if (lock->context == DRM_KERNEL_CONTEXT) { 58 DRM_ERROR("Process %d using kernel context %d\n", 59 DRM_CURRENTPID, lock->context); 60 return EINVAL; 61 } 62 63 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 64 lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock, 65 lock->flags); 66 67 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && 68 lock->context < 0) 69 return EINVAL; 70 71 DRM_LOCK(); 72 for (;;) { 73 if (drm_lock_take(&dev->lock, lock->context)) { 74 dev->lock.file_priv = file_priv; 75 dev->lock.lock_time = jiffies; 76 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 77 break; /* Got lock */ 78 } 79 80 /* Contention */ 81 crit_enter(); 82 tsleep_interlock((void *)&dev->lock.lock_queue); 83 DRM_UNLOCK(); 84 ret = tsleep((void *)&dev->lock.lock_queue, PCATCH, 85 "drmlk2", 0); 86 crit_exit(); 87 DRM_LOCK(); 88 if (ret != 0) 89 break; 90 } 91 DRM_UNLOCK(); 92 93 if (ret == ERESTART) 94 DRM_DEBUG("restarting syscall\n"); 95 else 96 DRM_DEBUG("%d %s\n", lock->context, 97 ret ? "interrupted" : "has lock"); 98 99 if (ret != 0) 100 return ret; 101 102 /* XXX: Add signal blocking here */ 103 104 if (dev->driver->dma_quiescent != NULL && 105 (lock->flags & _DRM_LOCK_QUIESCENT)) 106 dev->driver->dma_quiescent(dev); 107 108 return 0; 109 } 110 111 int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 112 { 113 struct drm_lock *lock = data; 114 115 DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n", 116 lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock, 117 lock->flags); 118 119 if (lock->context == DRM_KERNEL_CONTEXT) { 120 DRM_ERROR("Process %d using kernel context %d\n", 121 DRM_CURRENTPID, lock->context); 122 return EINVAL; 123 } 124 125 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 126 127 DRM_LOCK(); 128 drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT); 129 130 if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) { 131 DRM_ERROR("\n"); 132 } 133 DRM_UNLOCK(); 134 135 return 0; 136 } 137 138 int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) 139 { 140 volatile unsigned int *lock = &lock_data->hw_lock->lock; 141 unsigned int old, new; 142 143 do { 144 old = *lock; 145 if (old & _DRM_LOCK_HELD) 146 new = old | _DRM_LOCK_CONT; 147 else 148 new = context | _DRM_LOCK_HELD; 149 } while (!atomic_cmpset_int(lock, old, new)); 150 151 if (_DRM_LOCKING_CONTEXT(old) == context) { 152 if (old & _DRM_LOCK_HELD) { 153 if (context != DRM_KERNEL_CONTEXT) { 154 DRM_ERROR("%d holds heavyweight lock\n", 155 context); 156 } 157 return 0; 158 } 159 } 160 if (new == (context | _DRM_LOCK_HELD)) { 161 /* Have lock */ 162 return 1; 163 } 164 return 0; 165 } 166 167 /* This takes a lock forcibly and hands it to context. Should ONLY be used 168 inside *_unlock to give lock to kernel before calling *_dma_schedule. */ 169 int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) 170 { 171 volatile unsigned int *lock = &lock_data->hw_lock->lock; 172 unsigned int old, new; 173 174 lock_data->file_priv = NULL; 175 do { 176 old = *lock; 177 new = context | _DRM_LOCK_HELD; 178 } while (!atomic_cmpset_int(lock, old, new)); 179 180 return 1; 181 } 182 183 int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) 184 { 185 volatile unsigned int *lock = &lock_data->hw_lock->lock; 186 unsigned int old, new; 187 188 lock_data->file_priv = NULL; 189 do { 190 old = *lock; 191 new = 0; 192 } while (!atomic_cmpset_int(lock, old, new)); 193 194 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { 195 DRM_ERROR("%d freed heavyweight lock held by %d\n", 196 context, _DRM_LOCKING_CONTEXT(old)); 197 return 1; 198 } 199 DRM_WAKEUP_INT((void *)&lock_data->lock_queue); 200 return 0; 201 } 202