Import of XFree86 3.9.18
[gstreamer-omap:libdrm.git] / linux / i810_dma.c
1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2  * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
27  *          Jeff Hartmann <jhartmann@precisioninsight.com>
28  *
29  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $
30  *
31  */
32
33 #define __NO_VERSION__
34 #include "drmP.h"
35 #include "i810_drv.h"
36
37 #include <linux/interrupt.h>    /* For task queue support */
38
39 #define I810_REG(reg)           2
40 #define I810_BASE(reg)          ((unsigned long) \
41                                 dev->maplist[I810_REG(reg)]->handle)
42 #define I810_ADDR(reg)          (I810_BASE(reg) + reg)
43 #define I810_DEREF(reg)         *(__volatile__ int *)I810_ADDR(reg)
44 #define I810_READ(reg)          I810_DEREF(reg)
45 #define I810_WRITE(reg,val)     do { I810_DEREF(reg) = val; } while (0)
46
47 void i810_dma_init(drm_device_t *dev)
48 {
49         printk(KERN_INFO "i810_dma_init\n");
50 }
51
52 void i810_dma_cleanup(drm_device_t *dev)
53 {
54    printk(KERN_INFO "i810_dma_cleanup\n");
55 }
56
57 static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address,
58                                     unsigned long length)
59 {
60    printk(KERN_INFO "i810_dma_dispatch\n");
61 }
62
63 static inline void i810_dma_quiescent(drm_device_t *dev)
64 {
65 }
66
67 static inline void i810_dma_ready(drm_device_t *dev)
68 {
69    i810_dma_quiescent(dev);
70    printk(KERN_INFO "i810_dma_ready\n");
71 }
72
73 static inline int i810_dma_is_ready(drm_device_t *dev)
74 {
75
76    i810_dma_quiescent(dev);
77
78    printk(KERN_INFO "i810_dma_is_ready\n");
79    return 1;
80 }
81
82
83 static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
84 {
85         drm_device_t     *dev = (drm_device_t *)device;
86         drm_device_dma_t *dma = dev->dma;
87         
88         atomic_inc(&dev->total_irq);
89         if (i810_dma_is_ready(dev)) {
90                                 /* Free previous buffer */
91                 if (test_and_set_bit(0, &dev->dma_flag)) {
92                         atomic_inc(&dma->total_missed_free);
93                         return;
94                 }
95                 if (dma->this_buffer) {
96                         drm_free_buffer(dev, dma->this_buffer);
97                         dma->this_buffer = NULL;
98                 }
99                 clear_bit(0, &dev->dma_flag);
100
101                                 /* Dispatch new buffer */
102                 queue_task(&dev->tq, &tq_immediate);
103                 mark_bh(IMMEDIATE_BH);
104         }
105 }
106
107 /* Only called by i810_dma_schedule. */
108 static int i810_do_dma(drm_device_t *dev, int locked)
109 {
110         unsigned long    address;
111         unsigned long    length;
112         drm_buf_t        *buf;
113         int              retcode = 0;
114         drm_device_dma_t *dma = dev->dma;
115 #if DRM_DMA_HISTOGRAM
116         cycles_t         dma_start, dma_stop;
117 #endif
118
119         if (test_and_set_bit(0, &dev->dma_flag)) {
120                 atomic_inc(&dma->total_missed_dma);
121                 return -EBUSY;
122         }
123         
124 #if DRM_DMA_HISTOGRAM
125         dma_start = get_cycles();
126 #endif
127
128         if (!dma->next_buffer) {
129                 DRM_ERROR("No next_buffer\n");
130                 clear_bit(0, &dev->dma_flag);
131                 return -EINVAL;
132         }
133
134         buf     = dma->next_buffer;
135         address = (unsigned long)buf->bus_address;
136         length  = buf->used;
137         
138
139         DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
140                   buf->context, buf->idx, length);
141
142         if (buf->list == DRM_LIST_RECLAIM) {
143                 drm_clear_next_buffer(dev);
144                 drm_free_buffer(dev, buf);
145                 clear_bit(0, &dev->dma_flag);
146                 return -EINVAL;
147         }
148
149         if (!length) {
150                 DRM_ERROR("0 length buffer\n");
151                 drm_clear_next_buffer(dev);
152                 drm_free_buffer(dev, buf);
153                 clear_bit(0, &dev->dma_flag);
154                 return 0;
155         }
156         
157         if (!i810_dma_is_ready(dev)) {
158                 clear_bit(0, &dev->dma_flag);
159                 return -EBUSY;
160         }
161
162         if (buf->while_locked) {
163                 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
164                         DRM_ERROR("Dispatching buffer %d from pid %d"
165                                   " \"while locked\", but no lock held\n",
166                                   buf->idx, buf->pid);
167                 }
168         } else {
169                 if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
170                                               DRM_KERNEL_CONTEXT)) {
171                         atomic_inc(&dma->total_missed_lock);
172                         clear_bit(0, &dev->dma_flag);
173                         return -EBUSY;
174                 }
175         }
176
177         if (dev->last_context != buf->context
178             && !(dev->queuelist[buf->context]->flags
179                  & _DRM_CONTEXT_PRESERVED)) {
180                                 /* PRE: dev->last_context != buf->context */
181                 if (drm_context_switch(dev, dev->last_context, buf->context)) {
182                         drm_clear_next_buffer(dev);
183                         drm_free_buffer(dev, buf);
184                 }
185                 retcode = -EBUSY;
186                 goto cleanup;
187                         
188                                 /* POST: we will wait for the context
189                                    switch and will dispatch on a later call
190                                    when dev->last_context == buf->context.
191                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
192                                    TIME! */
193         }
194
195         drm_clear_next_buffer(dev);
196         buf->pending     = 1;
197         buf->waiting     = 0;
198         buf->list        = DRM_LIST_PEND;
199 #if DRM_DMA_HISTOGRAM
200         buf->time_dispatched = get_cycles();
201 #endif
202
203         i810_dma_dispatch(dev, address, length);
204         drm_free_buffer(dev, dma->this_buffer);
205         dma->this_buffer = buf;
206
207         atomic_add(length, &dma->total_bytes);
208         atomic_inc(&dma->total_dmas);
209
210         if (!buf->while_locked && !dev->context_flag && !locked) {
211                 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
212                                   DRM_KERNEL_CONTEXT)) {
213                         DRM_ERROR("\n");
214                 }
215         }
216 cleanup:
217
218         clear_bit(0, &dev->dma_flag);
219
220 #if DRM_DMA_HISTOGRAM
221         dma_stop = get_cycles();
222         atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
223 #endif
224
225         return retcode;
226 }
227
228 static void i810_dma_schedule_timer_wrapper(unsigned long dev)
229 {
230         i810_dma_schedule((drm_device_t *)dev, 0);
231 }
232
233 static void i810_dma_schedule_tq_wrapper(void *dev)
234 {
235         i810_dma_schedule(dev, 0);
236 }
237
238 int i810_dma_schedule(drm_device_t *dev, int locked)
239 {
240         int              next;
241         drm_queue_t      *q;
242         drm_buf_t        *buf;
243         int              retcode   = 0;
244         int              processed = 0;
245         int              missed;
246         int              expire    = 20;
247         drm_device_dma_t *dma      = dev->dma;
248 #if DRM_DMA_HISTOGRAM
249         cycles_t         schedule_start;
250 #endif
251
252         if (test_and_set_bit(0, &dev->interrupt_flag)) {
253                                 /* Not reentrant */
254                 atomic_inc(&dma->total_missed_sched);
255                 return -EBUSY;
256         }
257         missed = atomic_read(&dma->total_missed_sched);
258
259 #if DRM_DMA_HISTOGRAM
260         schedule_start = get_cycles();
261 #endif
262
263 again:
264         if (dev->context_flag) {
265                 clear_bit(0, &dev->interrupt_flag);
266                 return -EBUSY;
267         }
268         if (dma->next_buffer) {
269                                 /* Unsent buffer that was previously
270                                    selected, but that couldn't be sent
271                                    because the lock could not be obtained
272                                    or the DMA engine wasn't ready.  Try
273                                    again. */
274                 atomic_inc(&dma->total_tried);
275                 if (!(retcode = i810_do_dma(dev, locked))) {
276                         atomic_inc(&dma->total_hit);
277                         ++processed;
278                 }
279         } else {
280                 do {
281                         next = drm_select_queue(dev,
282                                              i810_dma_schedule_timer_wrapper);
283                         if (next >= 0) {
284                                 q   = dev->queuelist[next];
285                                 buf = drm_waitlist_get(&q->waitlist);
286                                 dma->next_buffer = buf;
287                                 dma->next_queue  = q;
288                                 if (buf && buf->list == DRM_LIST_RECLAIM) {
289                                         drm_clear_next_buffer(dev);
290                                         drm_free_buffer(dev, buf);
291                                 }
292                         }
293                 } while (next >= 0 && !dma->next_buffer);
294                 if (dma->next_buffer) {
295                         if (!(retcode = i810_do_dma(dev, locked))) {
296                                 ++processed;
297                         }
298                 }
299         }
300
301         if (--expire) {
302                 if (missed != atomic_read(&dma->total_missed_sched)) {
303                         atomic_inc(&dma->total_lost);
304                         if (i810_dma_is_ready(dev)) goto again;
305                 }
306                 if (processed && i810_dma_is_ready(dev)) {
307                         atomic_inc(&dma->total_lost);
308                         processed = 0;
309                         goto again;
310                 }
311         }
312         
313         clear_bit(0, &dev->interrupt_flag);
314         
315 #if DRM_DMA_HISTOGRAM
316         atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
317                                                            - schedule_start)]);
318 #endif
319         return retcode;
320 }
321
322 static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d)
323 {
324         unsigned long     address;
325         unsigned long     length;
326         int               must_free = 0;
327         int               retcode   = 0;
328         int               i;
329         int               idx;
330         drm_buf_t         *buf;
331         drm_buf_t         *last_buf = NULL;
332         drm_device_dma_t  *dma      = dev->dma;
333         DECLARE_WAITQUEUE(entry, current);
334
335                                 /* Turn off interrupt handling */
336         while (test_and_set_bit(0, &dev->interrupt_flag)) {
337                 schedule();
338                 if (signal_pending(current)) return -EINTR;
339         }
340         if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
341                 while (!drm_lock_take(&dev->lock.hw_lock->lock,
342                                       DRM_KERNEL_CONTEXT)) {
343                         schedule();
344                         if (signal_pending(current)) {
345                                 clear_bit(0, &dev->interrupt_flag);
346                                 return -EINTR;
347                         }
348                 }
349                 ++must_free;
350         }
351         atomic_inc(&dma->total_prio);
352
353         for (i = 0; i < d->send_count; i++) {
354                 idx = d->send_indices[i];
355                 if (idx < 0 || idx >= dma->buf_count) {
356                         DRM_ERROR("Index %d (of %d max)\n",
357                                   d->send_indices[i], dma->buf_count - 1);
358                         continue;
359                 }
360                 buf = dma->buflist[ idx ];
361                 if (buf->pid != current->pid) {
362                         DRM_ERROR("Process %d using buffer owned by %d\n",
363                                   current->pid, buf->pid);
364                         retcode = -EINVAL;
365                         goto cleanup;
366                 }
367                 if (buf->list != DRM_LIST_NONE) {
368                         DRM_ERROR("Process %d using %d's buffer on list %d\n",
369                                   current->pid, buf->pid, buf->list);
370                         retcode = -EINVAL;
371                         goto cleanup;
372                 }
373                                 /* This isn't a race condition on
374                                    buf->list, since our concern is the
375                                    buffer reclaim during the time the
376                                    process closes the /dev/drm? handle, so
377                                    it can't also be doing DMA. */
378                 buf->list         = DRM_LIST_PRIO;
379                 buf->used         = d->send_sizes[i];
380                 buf->context      = d->context;
381                 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
382                 address           = (unsigned long)buf->address;
383                 length            = buf->used;
384                 if (!length) {
385                         DRM_ERROR("0 length buffer\n");
386                 }
387                 if (buf->pending) {
388                         DRM_ERROR("Sending pending buffer:"
389                                   " buffer %d, offset %d\n",
390                                   d->send_indices[i], i);
391                         retcode = -EINVAL;
392                         goto cleanup;
393                 }
394                 if (buf->waiting) {
395                         DRM_ERROR("Sending waiting buffer:"
396                                   " buffer %d, offset %d\n",
397                                   d->send_indices[i], i);
398                         retcode = -EINVAL;
399                         goto cleanup;
400                 }
401                 buf->pending = 1;
402                 
403                 if (dev->last_context != buf->context
404                     && !(dev->queuelist[buf->context]->flags
405                          & _DRM_CONTEXT_PRESERVED)) {
406                         add_wait_queue(&dev->context_wait, &entry);
407                         current->state = TASK_INTERRUPTIBLE;
408                                 /* PRE: dev->last_context != buf->context */
409                         drm_context_switch(dev, dev->last_context,
410                                            buf->context);
411                                 /* POST: we will wait for the context
412                                    switch and will dispatch on a later call
413                                    when dev->last_context == buf->context.
414                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
415                                    TIME! */
416                         schedule();
417                         current->state = TASK_RUNNING;
418                         remove_wait_queue(&dev->context_wait, &entry);
419                         if (signal_pending(current)) {
420                                 retcode = -EINTR;
421                                 goto cleanup;
422                         }
423                         if (dev->last_context != buf->context) {
424                                 DRM_ERROR("Context mismatch: %d %d\n",
425                                           dev->last_context,
426                                           buf->context);
427                         }
428                 }
429
430 #if DRM_DMA_HISTOGRAM
431                 buf->time_queued     = get_cycles();
432                 buf->time_dispatched = buf->time_queued;
433 #endif
434                 i810_dma_dispatch(dev, address, length);
435                 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
436                                   DRM_KERNEL_CONTEXT)) {
437                    DRM_ERROR("\n");
438                 }
439
440                 atomic_add(length, &dma->total_bytes);
441                 atomic_inc(&dma->total_dmas);
442                 
443                 if (last_buf) {
444                         drm_free_buffer(dev, last_buf);
445                 }
446                 last_buf = buf;
447         }
448
449
450 cleanup:
451         if (last_buf) {
452                 i810_dma_ready(dev);
453                 drm_free_buffer(dev, last_buf);
454         }
455         
456         if (must_free && !dev->context_flag) {
457                 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
458                                   DRM_KERNEL_CONTEXT)) {
459                         DRM_ERROR("\n");
460                 }
461         }
462         clear_bit(0, &dev->interrupt_flag);
463         return retcode;
464 }
465
466 static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
467 {
468         DECLARE_WAITQUEUE(entry, current);
469         drm_buf_t         *last_buf = NULL;
470         int               retcode   = 0;
471         drm_device_dma_t  *dma      = dev->dma;
472
473         if (d->flags & _DRM_DMA_BLOCK) {
474                 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
475                 add_wait_queue(&last_buf->dma_wait, &entry);
476         }
477         
478         if ((retcode = drm_dma_enqueue(dev, d))) {
479                 if (d->flags & _DRM_DMA_BLOCK)
480                         remove_wait_queue(&last_buf->dma_wait, &entry);
481                 return retcode;
482         }
483         
484         i810_dma_schedule(dev, 0);
485         
486         if (d->flags & _DRM_DMA_BLOCK) {
487                 DRM_DEBUG("%d waiting\n", current->pid);
488                 current->state = TASK_INTERRUPTIBLE;
489                 for (;;) {
490                         if (!last_buf->waiting
491                             && !last_buf->pending)
492                                 break; /* finished */
493                         schedule();
494                         if (signal_pending(current)) {
495                                 retcode = -EINTR; /* Can't restart */
496                                 break;
497                         }
498                 }
499                 current->state = TASK_RUNNING;
500                 DRM_DEBUG("%d running\n", current->pid);
501                 remove_wait_queue(&last_buf->dma_wait, &entry);
502                 if (!retcode
503                     || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
504                         if (!waitqueue_active(&last_buf->dma_wait)) {
505                                 drm_free_buffer(dev, last_buf);
506                         }
507                 }
508                 if (retcode) {
509                         DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
510                                   d->context,
511                                   last_buf->waiting,
512                                   last_buf->pending,
513                                   DRM_WAITCOUNT(dev, d->context),
514                                   last_buf->idx,
515                                   last_buf->list,
516                                   last_buf->pid,
517                                   current->pid);
518                 }
519         }
520         return retcode;
521 }
522
523 int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd,
524               unsigned long arg)
525 {
526         drm_file_t        *priv     = filp->private_data;
527         drm_device_t      *dev      = priv->dev;
528         drm_device_dma_t  *dma      = dev->dma;
529         int               retcode   = 0;
530         drm_dma_t         d;
531
532         printk("i810_dma start\n");
533         copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
534         DRM_DEBUG("%d %d: %d send, %d req\n",
535                   current->pid, d.context, d.send_count, d.request_count);
536
537         if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
538                 DRM_ERROR("Process %d using context %d\n",
539                           current->pid, d.context);
540                 return -EINVAL;
541         }
542
543         if (d.send_count < 0 || d.send_count > dma->buf_count) {
544                 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
545                           current->pid, d.send_count, dma->buf_count);
546                 return -EINVAL;
547         }
548         if (d.request_count < 0 || d.request_count > dma->buf_count) {
549                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
550                           current->pid, d.request_count, dma->buf_count);
551                 return -EINVAL;
552         }
553
554         if (d.send_count) {
555 #if 0
556                 if (d.flags & _DRM_DMA_PRIORITY)
557                         retcode = i810_dma_priority(dev, &d);
558                 else 
559                         retcode = i810_dma_send_buffers(dev, &d);
560 #endif
561            printk("i810_dma priority\n");
562
563            retcode = i810_dma_priority(dev, &d);
564         }
565
566         d.granted_count = 0;
567
568         if (!retcode && d.request_count) {
569                 retcode = drm_dma_get_buffers(dev, &d);
570         }
571
572         DRM_DEBUG("%d returning, granted = %d\n",
573                   current->pid, d.granted_count);
574         copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
575
576         printk("i810_dma end (granted)\n");
577         return retcode;
578 }
579
580 int i810_irq_install(drm_device_t *dev, int irq)
581 {
582         int retcode;
583
584         if (!irq)     return -EINVAL;
585         
586         down(&dev->struct_sem);
587         if (dev->irq) {
588                 up(&dev->struct_sem);
589                 return -EBUSY;
590         }
591         dev->irq = irq;
592         up(&dev->struct_sem);
593         
594         DRM_DEBUG("%d\n", irq);
595
596         dev->context_flag     = 0;
597         dev->interrupt_flag   = 0;
598         dev->dma_flag         = 0;
599         
600         dev->dma->next_buffer = NULL;
601         dev->dma->next_queue  = NULL;
602         dev->dma->this_buffer = NULL;
603
604         dev->tq.next          = NULL;
605         dev->tq.sync          = 0;
606         dev->tq.routine       = i810_dma_schedule_tq_wrapper;
607         dev->tq.data          = dev;
608
609
610                                 /* Before installing handler */
611         /* TODO */      
612                                 /* Install handler */
613         if ((retcode = request_irq(dev->irq,
614                                    i810_dma_service,
615                                    0,
616                                    dev->devname,
617                                    dev))) {
618                 down(&dev->struct_sem);
619                 dev->irq = 0;
620                 up(&dev->struct_sem);
621                 return retcode;
622         }
623
624                                 /* After installing handler */
625         /* TODO */
626         return 0;
627 }
628
629 int i810_irq_uninstall(drm_device_t *dev)
630 {
631         int irq;
632
633         down(&dev->struct_sem);
634         irq      = dev->irq;
635         dev->irq = 0;
636         up(&dev->struct_sem);
637         
638         if (!irq) return -EINVAL;
639         
640         DRM_DEBUG("%d\n", irq);
641         
642         /* TODO : Disable interrupts */
643         free_irq(irq, dev);
644
645         return 0;
646 }
647
648
649 int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
650                   unsigned long arg)
651 {
652         drm_file_t      *priv   = filp->private_data;
653         drm_device_t    *dev    = priv->dev;
654         drm_control_t   ctl;
655         int             retcode;
656    
657         printk(KERN_INFO "i810_control\n");
658         i810_dma_init(dev);
659
660         copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
661         
662         switch (ctl.func) {
663         case DRM_INST_HANDLER:
664                 if ((retcode = i810_irq_install(dev, ctl.irq)))
665                         return retcode;
666                 break;
667         case DRM_UNINST_HANDLER:
668                 if ((retcode = i810_irq_uninstall(dev)))
669                         return retcode;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674         return 0;
675 }
676
677 int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
678                unsigned long arg)
679 {
680         drm_file_t        *priv   = filp->private_data;
681         drm_device_t      *dev    = priv->dev;
682         DECLARE_WAITQUEUE(entry, current);
683         int               ret   = 0;
684         drm_lock_t        lock;
685         drm_queue_t       *q;
686 #if DRM_DMA_HISTOGRAM
687         cycles_t          start;
688
689         dev->lck_start = start = get_cycles();
690 #endif
691
692         copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
693
694         if (lock.context == DRM_KERNEL_CONTEXT) {
695                 DRM_ERROR("Process %d using kernel context %d\n",
696                           current->pid, lock.context);
697                 return -EINVAL;
698         }
699
700         if (lock.context < 0 || lock.context >= dev->queue_count) {
701                 return -EINVAL;
702         }
703         q = dev->queuelist[lock.context];
704         
705         ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
706
707         if (!ret) {
708                 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
709                     != lock.context) {
710                         long j = jiffies - dev->lock.lock_time;
711
712                         if (j > 0 && j <= DRM_LOCK_SLICE) {
713                                 /* Can't take lock if we just had it and
714                                    there is contention. */
715                                 current->state = TASK_INTERRUPTIBLE;
716                                 schedule_timeout(j);
717                         }
718                 }
719                 add_wait_queue(&dev->lock.lock_queue, &entry);
720                 for (;;) {
721                         if (!dev->lock.hw_lock) {
722                                 /* Device has been unregistered */
723                                 ret = -EINTR;
724                                 break;
725                         }
726                         if (drm_lock_take(&dev->lock.hw_lock->lock,
727                                           lock.context)) {
728                                 dev->lock.pid       = current->pid;
729                                 dev->lock.lock_time = jiffies;
730                                 atomic_inc(&dev->total_locks);
731                                 atomic_inc(&q->total_locks);
732                                 break;  /* Got lock */
733                         }
734                         
735                                 /* Contention */
736                         atomic_inc(&dev->total_sleeps);
737                         current->state = TASK_INTERRUPTIBLE;
738                         schedule();
739                         if (signal_pending(current)) {
740                                 ret = -ERESTARTSYS;
741                                 break;
742                         }
743                 }
744                 current->state = TASK_RUNNING;
745                 remove_wait_queue(&dev->lock.lock_queue, &entry);
746         }
747
748         drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
749         
750         if (!ret) {
751                 if (lock.flags & _DRM_LOCK_READY)
752                         i810_dma_ready(dev);
753                 if (lock.flags & _DRM_LOCK_QUIESCENT)
754                         i810_dma_quiescent(dev);
755         }
756
757 #if DRM_DMA_HISTOGRAM
758         atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
759 #endif
760         
761         return ret;
762 }