Import of XFree86 3.9.18
[gstreamer-omap:libdrm.git] / linux / mga_dma.c
1 /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
2  * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
27  *          Jeff Hartmann <jhartmann@precisioninsight.com>
28  *
29  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_dma.c,v 1.1 2000/02/11 17:26:07 dawes Exp $
30  *
31  */
32
33 #define __NO_VERSION__
34 #include "drmP.h"
35 #include "mga_drv.h"
36 #include "mgareg_flags.h"
37 #include "mga_dma.h"
38 #include "mga_state.h"
39
40 #include <linux/interrupt.h>    /* For task queue support */
41
42 #define MGA_REG(reg)            2
43 #define MGA_BASE(reg)           ((unsigned long) \
44                                 ((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
45 #define MGA_ADDR(reg)           (MGA_BASE(reg) + reg)
46 #define MGA_DEREF(reg)          *(__volatile__ int *)MGA_ADDR(reg)
47 #define MGA_READ(reg)           MGA_DEREF(reg)
48 #define MGA_WRITE(reg,val)      do { MGA_DEREF(reg) = val; } while (0)
49
50 #define PDEA_pagpxfer_enable         0x2
51 #define MGA_SYNC_TAG                 0x423f4200
52
53 typedef enum {
54         TT_GENERAL,
55         TT_BLIT,
56         TT_VECTOR,
57         TT_VERTEX
58 } transferType_t;
59
60
61 static void mga_delay(void)
62 {
63         return;
64 }
65
66 int mga_dma_cleanup(drm_device_t *dev)
67 {
68         if(dev->dev_private) {
69                 drm_mga_private_t *dev_priv = 
70                         (drm_mga_private_t *) dev->dev_private;
71       
72                 if(dev_priv->ioremap) {
73                         int temp = (dev_priv->warp_ucode_size + 
74                                     dev_priv->primary_size + 
75                                     PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
76
77                         drm_ioremapfree((void *) dev_priv->ioremap, temp);
78                 }
79
80                 drm_free(dev->dev_private, sizeof(drm_mga_private_t), 
81                          DRM_MEM_DRIVER);
82                 dev->dev_private = NULL;
83         }
84
85         return 0;
86 }
87
88 static int mga_alloc_kernel_queue(drm_device_t *dev)
89 {
90         drm_queue_t *queue = NULL;
91                                 /* Allocate a new queue */
92         down(&dev->struct_sem);
93         
94         if(dev->queue_count != 0) {
95            /* Reseting the kernel context here is not
96             * a race, since it can only happen when that
97             * queue is empty.
98             */
99            queue = dev->queuelist[DRM_KERNEL_CONTEXT];
100            printk("Kernel queue already allocated\n");
101         } else {
102            queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
103            if(!queue) {
104               up(&dev->struct_sem);
105               printk("out of memory\n");
106               return -ENOMEM;
107            }
108            ++dev->queue_count;
109            dev->queuelist = drm_alloc(sizeof(*dev->queuelist), 
110                                       DRM_MEM_QUEUES);
111            if(!dev->queuelist) {
112               up(&dev->struct_sem);
113               drm_free(queue, sizeof(*queue), DRM_MEM_QUEUES);
114               printk("out of memory\n");
115               return -ENOMEM;
116            }  
117         }
118            
119         memset(queue, 0, sizeof(*queue));
120         atomic_set(&queue->use_count, 1);
121         atomic_set(&queue->finalization,  0);
122         atomic_set(&queue->block_count,   0);
123         atomic_set(&queue->block_read,    0);
124         atomic_set(&queue->block_write,   0);
125         atomic_set(&queue->total_queued,  0);
126         atomic_set(&queue->total_flushed, 0);
127         atomic_set(&queue->total_locks,   0);
128
129         init_waitqueue_head(&queue->write_queue);
130         init_waitqueue_head(&queue->read_queue);
131         init_waitqueue_head(&queue->flush_queue);
132
133         queue->flags = 0;
134
135         drm_waitlist_create(&queue->waitlist, dev->dma->buf_count);
136    
137         dev->queue_slots = 1;
138         dev->queuelist[DRM_KERNEL_CONTEXT] = queue;
139         dev->queue_count--;
140         
141         up(&dev->struct_sem);
142         printk("%d (new)\n", dev->queue_count - 1);
143         return DRM_KERNEL_CONTEXT;
144 }
145
146 static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
147         drm_mga_private_t *dev_priv;
148         drm_map_t *prim_map = NULL;
149         drm_map_t *sarea_map = NULL;
150         int temp;
151
152
153         dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
154         if(dev_priv == NULL) return -ENOMEM;
155         dev->dev_private = (void *) dev_priv;
156
157         printk("dev_private\n");
158
159         memset(dev_priv, 0, sizeof(drm_mga_private_t));
160         atomic_set(&dev_priv->pending_bufs, 0);
161
162         if((init->reserved_map_idx >= dev->map_count) ||
163            (init->buffer_map_idx >= dev->map_count)) {
164                 mga_dma_cleanup(dev);
165                 printk("reserved_map or buffer_map are invalid\n");
166                 return -EINVAL;
167         }
168    
169         if(mga_alloc_kernel_queue(dev) != DRM_KERNEL_CONTEXT) {
170            mga_dma_cleanup(dev);
171            DRM_ERROR("Kernel context queue not present\n");
172         }
173
174         dev_priv->reserved_map_idx = init->reserved_map_idx;
175         dev_priv->buffer_map_idx = init->buffer_map_idx;
176         sarea_map = dev->maplist[0];
177         dev_priv->sarea_priv = (drm_mga_sarea_t *) 
178                 ((u8 *)sarea_map->handle + 
179                  init->sarea_priv_offset);
180         printk("sarea_priv\n");
181
182         /* Scale primary size to the next page */
183         dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) / 
184                                   PAGE_SIZE) * PAGE_SIZE;
185         dev_priv->warp_ucode_size = init->warp_ucode_size;
186         dev_priv->chipset = init->chipset;
187         dev_priv->fbOffset = init->fbOffset;
188         dev_priv->backOffset = init->backOffset;
189         dev_priv->depthOffset = init->depthOffset;
190         dev_priv->textureOffset = init->textureOffset;
191         dev_priv->textureSize = init->textureSize;
192         dev_priv->cpp = init->cpp;
193         dev_priv->sgram = init->sgram;
194         dev_priv->stride = init->stride;
195
196         dev_priv->frontOrg = init->frontOrg;
197         dev_priv->backOrg = init->backOrg;
198         dev_priv->depthOrg = init->depthOrg;
199         dev_priv->mAccess = init->mAccess;
200         
201    
202         printk("memcpy\n");
203         memcpy(&dev_priv->WarpIndex, &init->WarpIndex, 
204                sizeof(mgaWarpIndex) * MGA_MAX_WARP_PIPES);
205         printk("memcpy done\n");
206         prim_map = dev->maplist[init->reserved_map_idx];
207         dev_priv->prim_phys_head = dev->agp->base + init->reserved_map_agpstart;
208         temp = init->warp_ucode_size + dev_priv->primary_size;
209         temp = ((temp + PAGE_SIZE - 1) / 
210                 PAGE_SIZE) * PAGE_SIZE;
211         printk("temp : %x\n", temp);
212         printk("dev->agp->base: %lx\n", dev->agp->base);
213         printk("init->reserved_map_agpstart: %x\n", init->reserved_map_agpstart);
214
215
216         dev_priv->ioremap = drm_ioremap(dev->agp->base + init->reserved_map_agpstart, 
217                                         temp);
218         if(dev_priv->ioremap == NULL) {
219                 printk("Ioremap failed\n");
220                 mga_dma_cleanup(dev);
221                 return -ENOMEM;
222         }
223
224
225
226         dev_priv->prim_head = (u32 *)dev_priv->ioremap;
227         printk("dev_priv->prim_head : %p\n", dev_priv->prim_head);
228         dev_priv->current_dma_ptr = dev_priv->prim_head;
229         dev_priv->prim_num_dwords = 0;
230         dev_priv->prim_max_dwords = dev_priv->primary_size / 4;
231    
232         printk("dma initialization\n");
233
234         /* Private is now filled in, initialize the hardware */
235         {
236                 PRIMLOCALS;
237                 PRIMRESET( dev_priv );
238                 PRIMGETPTR( dev_priv );
239                 PRIMOUTREG(MGAREG_DMAPAD, 0);
240                 PRIMOUTREG(MGAREG_DMAPAD, 0);
241                 PRIMOUTREG(MGAREG_DWGSYNC, 0);
242                 PRIMOUTREG(MGAREG_SOFTRAP, 0);
243                 PRIMADVANCE( dev_priv );
244
245                 /* Poll for the first buffer to insure that
246                  * the status register will be correct
247                  */
248                 printk("phys_head : %lx\n", phys_head);
249    
250                 MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
251
252                 while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) {
253                         int i;
254                         for(i = 0 ; i < 4096; i++) mga_delay();
255                 }
256
257                 MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
258
259                 MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) | 
260                                            PDEA_pagpxfer_enable));
261
262                 while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) {
263                         int i;
264                         for(i = 0; i < 4096; i++) mga_delay();
265                 }
266
267         }
268    
269         printk("dma init was successful\n");
270         return 0;
271 }
272
273 int mga_dma_init(struct inode *inode, struct file *filp,
274                  unsigned int cmd, unsigned long arg)
275 {
276         drm_file_t *priv = filp->private_data;
277         drm_device_t *dev = priv->dev;
278         drm_mga_init_t init;
279    
280         copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT);
281    
282         switch(init.func) {
283         case MGA_INIT_DMA:
284                 return mga_dma_initialize(dev, &init);
285         case MGA_CLEANUP_DMA:
286                 return mga_dma_cleanup(dev);
287         }
288
289         return -EINVAL;
290 }
291
292 #define MGA_ILOAD_CMD (DC_opcod_iload | DC_atype_rpl |                  \
293                        DC_linear_linear | DC_bltmod_bfcol |             \
294                        (0xC << DC_bop_SHIFT) | DC_sgnzero_enable |      \
295                        DC_shftzero_enable | DC_clipdis_enable)
296
297 static void __mga_iload_small(drm_device_t *dev,
298                               drm_buf_t *buf,
299                               int use_agp) 
300 {
301         drm_mga_private_t *dev_priv = dev->dev_private;
302         drm_mga_buf_priv_t *buf_priv = buf->dev_private;
303         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
304         unsigned long address = (unsigned long)buf->bus_address;
305         int length = buf->used;
306         int y1 = buf_priv->boxes[0].y1;
307         int x1 = buf_priv->boxes[0].x1;
308         int y2 = buf_priv->boxes[0].y2;
309         int x2 = buf_priv->boxes[0].x2;
310         int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG];
311         int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS];
312         PRIMLOCALS;
313
314         PRIMRESET(dev_priv);            
315         PRIMGETPTR(dev_priv);
316    
317         PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp);
318         PRIMOUTREG(MGAREG_MACCESS, maccess);
319         PRIMOUTREG(MGAREG_PITCH, (1 << 15));
320         PRIMOUTREG(MGAREG_YDST, y1 * (x2 - x1));   
321         PRIMOUTREG(MGAREG_LEN, 1);
322         PRIMOUTREG(MGAREG_FXBNDRY, ((x2 - x1) * (y2 - y1) - 1) << 16);
323         PRIMOUTREG(MGAREG_AR0, (x2 - x1) * (y2 - y1) - 1);
324         PRIMOUTREG(MGAREG_AR3, 0);
325         PRIMOUTREG(MGAREG_DMAPAD, 0);
326         PRIMOUTREG(MGAREG_DMAPAD, 0);
327         PRIMOUTREG(MGAREG_DMAPAD, 0);
328         PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD);
329         PRIMOUTREG(MGAREG_DMAPAD, 0);
330         PRIMOUTREG(MGAREG_DMAPAD, 0);
331         PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT);
332         PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp);
333         PRIMOUTREG(MGAREG_DMAPAD, 0);
334         PRIMOUTREG(MGAREG_DMAPAD, 0);
335         PRIMOUTREG(MGAREG_DWGSYNC, 0);
336         PRIMOUTREG(MGAREG_SOFTRAP, 0);
337         PRIMADVANCE(dev_priv);
338 #if 0
339         /* For now we need to set this in the ioctl */
340         sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
341 #endif
342         MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
343         while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
344
345         MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
346         MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);   
347 }
348
349 static void __mga_iload_xy(drm_device_t *dev,
350                            drm_buf_t *buf,
351                            int use_agp) 
352 {
353         drm_mga_private_t *dev_priv = dev->dev_private;
354         drm_mga_buf_priv_t *buf_priv = buf->dev_private;
355         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
356         unsigned long address = (unsigned long)buf->bus_address;
357         int length = buf->used;
358         int y1 = buf_priv->boxes[0].y1;
359         int x1 = buf_priv->boxes[0].x1;
360         int y2 = buf_priv->boxes[0].y2;
361         int x2 = buf_priv->boxes[0].x2;
362         int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG];
363         int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS];
364         int pitch = buf_priv->ServerState[MGA_2DREG_PITCH];
365         int width, height;
366         int texperdword = 0;
367         PRIMLOCALS;
368    
369         width = (x2 - x1);
370         height = (y2 - y1);
371         switch((maccess & 0x00000003)) {
372                 case 0:
373                 texperdword = 4;
374                 break;
375                 case 1:
376                 texperdword = 2;
377                 break;
378                 case 2:
379                 texperdword = 1;
380                 break;
381                 default:
382                 DRM_ERROR("Invalid maccess value passed to __mga_iload_xy\n");
383                 return;  
384         }
385    
386         x2 = x1 + width;
387         x2 = (x2 + (texperdword - 1)) & ~(texperdword - 1);
388         x1 = (x1 + (texperdword - 1)) & ~(texperdword - 1);
389         width = x2 - x1;
390    
391         PRIMRESET(dev_priv);            
392         PRIMGETPTR(dev_priv);
393         PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp);
394         PRIMOUTREG(MGAREG_MACCESS, maccess);
395         PRIMOUTREG(MGAREG_PITCH, pitch);
396         PRIMOUTREG(MGAREG_YDSTLEN, (y1 << 16) | height);
397    
398         PRIMOUTREG(MGAREG_FXBNDRY, ((x1+width-1) << 16) | x1);
399         PRIMOUTREG(MGAREG_AR0, width * height - 1);
400         PRIMOUTREG(MGAREG_AR3, 0 );
401         PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD);
402                    
403         PRIMOUTREG(MGAREG_DMAPAD, 0);
404         PRIMOUTREG(MGAREG_DMAPAD, 0);
405         PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT);
406         PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp);
407            
408         PRIMOUTREG(MGAREG_DMAPAD, 0);
409         PRIMOUTREG(MGAREG_DMAPAD, 0);
410         PRIMOUTREG(MGAREG_DWGSYNC, 0);
411         PRIMOUTREG(MGAREG_SOFTRAP, 0);
412         PRIMADVANCE(dev_priv);
413 #if 0
414         /* For now we need to set this in the ioctl */
415         sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
416 #endif
417         MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
418         while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
419
420         MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
421         MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
422 }
423
424 static void mga_dma_dispatch_iload(drm_device_t *dev, drm_buf_t *buf)
425 {
426         drm_mga_buf_priv_t *buf_priv = buf->dev_private;
427
428         int use_agp = PDEA_pagpxfer_enable;
429         int x1 = buf_priv->boxes[0].x1;
430         int x2 = buf_priv->boxes[0].x2;
431    
432         if((x2 - x1) < 32) {
433                 printk("using iload small\n");
434                 __mga_iload_small(dev, buf, use_agp);
435         } else {
436                 printk("using iload xy\n");
437                 __mga_iload_xy(dev, buf, use_agp); 
438         }   
439 }
440
441 static void mga_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf)
442 {
443         drm_mga_private_t *dev_priv = dev->dev_private;
444         drm_mga_buf_priv_t *buf_priv = buf->dev_private;
445         unsigned long address = (unsigned long)buf->bus_address;
446         int length = buf->used;
447         int use_agp = PDEA_pagpxfer_enable;
448         int i, count;
449         PRIMLOCALS;
450
451         PRIMRESET(dev_priv);
452         
453         count = buf_priv->nbox;
454         if (count == 0) 
455                 count = 1;
456
457         mgaEmitState( dev_priv, buf_priv );
458
459         for (i = 0 ; i < count ; i++) {         
460                 if (i < buf_priv->nbox)
461                         mgaEmitClipRect( dev_priv, &buf_priv->boxes[i] );
462
463                 PRIMGETPTR(dev_priv);
464                 PRIMOUTREG( MGAREG_DMAPAD, 0);
465                 PRIMOUTREG( MGAREG_DMAPAD, 0);
466                 PRIMOUTREG( MGAREG_SECADDRESS, address | TT_VERTEX);
467                 PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp);
468
469                 PRIMOUTREG( MGAREG_DMAPAD, 0);
470                 PRIMOUTREG( MGAREG_DMAPAD, 0);
471                 PRIMOUTREG( MGAREG_DWGSYNC, 0);
472                 PRIMOUTREG( MGAREG_SOFTRAP, 0);
473                 PRIMADVANCE(dev_priv);
474         }
475
476         PRIMGETPTR( dev_priv );
477
478         MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
479         while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
480
481         MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
482         MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
483 }
484
485
486 /* Used internally for the small buffers generated from client state
487  * information. 
488  */
489 static void mga_dma_dispatch_general(drm_device_t *dev, drm_buf_t *buf)
490 {
491         drm_mga_private_t *dev_priv = dev->dev_private;
492         unsigned long address = (unsigned long)buf->bus_address;
493         int length = buf->used;
494         int use_agp = PDEA_pagpxfer_enable;
495         PRIMLOCALS;
496
497         PRIMRESET(dev_priv);
498         PRIMGETPTR(dev_priv);
499
500         PRIMOUTREG( MGAREG_DMAPAD, 0);
501         PRIMOUTREG( MGAREG_DMAPAD, 0);
502         PRIMOUTREG( MGAREG_SECADDRESS, address | TT_GENERAL);
503         PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp);
504
505         PRIMOUTREG( MGAREG_DMAPAD, 0);
506         PRIMOUTREG( MGAREG_DMAPAD, 0);
507         PRIMOUTREG( MGAREG_DWGSYNC, 0);
508         PRIMOUTREG( MGAREG_SOFTRAP, 0);
509         PRIMADVANCE(dev_priv);
510
511         MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
512         while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
513
514         MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
515         MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
516 }
517
518 /* Frees dispatch lock */
519 static inline void mga_dma_quiescent(drm_device_t *dev)
520 {
521         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
522
523         while(1) {
524            atomic_inc(&dev_priv->dispatch_lock);
525            if(atomic_read(&dev_priv->dispatch_lock) == 1) {
526               break;
527            } else {
528               atomic_dec(&dev_priv->dispatch_lock);
529            }
530         }
531         while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) ;
532 #if 0
533    MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
534 #endif
535         while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) ;
536         MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
537         while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
538         atomic_dec(&dev_priv->dispatch_lock);
539 }
540
541 /* Keeps dispatch lock held */
542
543 static inline int mga_dma_is_ready(drm_device_t *dev)
544 {
545         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
546    
547         atomic_inc(&dev_priv->dispatch_lock);
548         if(atomic_read(&dev_priv->dispatch_lock) == 1) {
549                 /* We got the lock */
550                 return 1;
551         } else {
552                 atomic_dec(&dev_priv->dispatch_lock);
553                 return 0;
554         }
555 }
556
557 static inline int mga_dma_is_ready_no_hold(drm_device_t *dev)
558 {
559         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
560    
561         atomic_inc(&dev_priv->dispatch_lock);
562         if(atomic_read(&dev_priv->dispatch_lock) == 1) {
563                 /* We got the lock, but free it */
564                 atomic_dec(&dev_priv->dispatch_lock);
565                 return 1;
566         } else {
567                 atomic_dec(&dev_priv->dispatch_lock);
568                 return 0;
569         }
570 }
571
572 static void mga_dma_service(int irq, void *device, struct pt_regs *regs)
573 {
574         drm_device_t     *dev = (drm_device_t *)device;
575         drm_device_dma_t *dma = dev->dma;
576         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
577
578         atomic_dec(&dev_priv->dispatch_lock);           
579         atomic_inc(&dev->total_irq);
580         MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
581
582                                 /* Free previous buffer */
583         if (test_and_set_bit(0, &dev->dma_flag)) {
584                 atomic_inc(&dma->total_missed_free);
585                 return;
586         }
587         if (dma->this_buffer) {
588                 drm_free_buffer(dev, dma->this_buffer);
589                 dma->this_buffer = NULL;
590         }
591         clear_bit(0, &dev->dma_flag);
592
593                                 /* Dispatch new buffer */
594         queue_task(&dev->tq, &tq_immediate);
595         mark_bh(IMMEDIATE_BH);
596
597 }
598
599 /* Only called by mga_dma_schedule. */
600 static int mga_do_dma(drm_device_t *dev, int locked)
601 {
602         drm_buf_t        *buf;
603         int              retcode = 0;
604         drm_device_dma_t *dma = dev->dma;
605         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
606         drm_mga_buf_priv_t *buf_priv;
607
608         printk("mga_do_dma\n");
609         if (test_and_set_bit(0, &dev->dma_flag)) {
610                 atomic_inc(&dma->total_missed_dma);
611                 return -EBUSY;
612         }
613         
614         if (!dma->next_buffer) {
615                 DRM_ERROR("No next_buffer\n");
616                 clear_bit(0, &dev->dma_flag);
617                 return -EINVAL;
618         }
619
620         buf     = dma->next_buffer;
621
622         printk("context %d, buffer %d\n", buf->context, buf->idx);
623
624         if (buf->list == DRM_LIST_RECLAIM) {
625                 drm_clear_next_buffer(dev);
626                 drm_free_buffer(dev, buf);
627                 clear_bit(0, &dev->dma_flag);
628                 return -EINVAL;
629         }
630
631         if (!buf->used) {
632                 DRM_ERROR("0 length buffer\n");
633                 drm_clear_next_buffer(dev);
634                 drm_free_buffer(dev, buf);
635                 clear_bit(0, &dev->dma_flag);
636                 return 0;
637         }
638         
639         if (mga_dma_is_ready(dev) == 0) {
640                 clear_bit(0, &dev->dma_flag);
641                 return -EBUSY;
642         }
643    
644         /* Always hold the hardware lock while dispatching.
645          */
646         if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
647                                       DRM_KERNEL_CONTEXT)) {
648                 atomic_inc(&dma->total_missed_lock);
649                 clear_bit(0, &dev->dma_flag);
650                 atomic_dec(&dev_priv->dispatch_lock);
651                 return -EBUSY;
652         }
653
654         dma->next_queue  = dev->queuelist[DRM_KERNEL_CONTEXT];
655         drm_clear_next_buffer(dev);
656         buf->pending     = 1;
657         buf->waiting     = 0;
658         buf->list        = DRM_LIST_PEND;
659
660         buf_priv = buf->dev_private;
661
662         printk("dispatch!\n");
663         switch (buf_priv->dma_type) {
664         case MGA_DMA_GENERAL:
665                 mga_dma_dispatch_general(dev, buf);
666                 break;
667         case MGA_DMA_VERTEX:
668                 mga_dma_dispatch_vertex(dev, buf);
669                 break;
670 /*      case MGA_DMA_SETUP: */
671 /*              mga_dma_dispatch_setup(dev, address, length); */
672 /*              break; */
673         case MGA_DMA_ILOAD:
674                 mga_dma_dispatch_iload(dev, buf);
675                 break;
676         default:
677                 printk("bad buffer type %x in dispatch\n", buf_priv->dma_type);
678                 break;
679         }
680         atomic_dec(&dev_priv->pending_bufs);
681
682         drm_free_buffer(dev, dma->this_buffer);
683         dma->this_buffer = buf;
684
685         atomic_add(buf->used, &dma->total_bytes);
686         atomic_inc(&dma->total_dmas);
687
688         if (!locked) {
689                 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
690                                   DRM_KERNEL_CONTEXT)) {
691                         DRM_ERROR("\n");
692                 }
693         }
694
695         clear_bit(0, &dev->dma_flag);
696    
697    if(!atomic_read(&dev_priv->pending_bufs)) {
698       wake_up_interruptible(&dev->queuelist[DRM_KERNEL_CONTEXT]->flush_queue);
699    }
700    
701 #if 0
702    wake_up_interruptible(&dev->lock.lock_queue);
703 #endif
704    
705         /* We hold the dispatch lock until the interrupt handler
706          * frees it
707          */
708         return retcode;
709 }
710
711 static void mga_dma_schedule_timer_wrapper(unsigned long dev)
712 {
713         mga_dma_schedule((drm_device_t *)dev, 0);
714 }
715
716 static void mga_dma_schedule_tq_wrapper(void *dev)
717 {
718         mga_dma_schedule(dev, 0);
719 }
720
721 int mga_dma_schedule(drm_device_t *dev, int locked)
722 {
723         drm_queue_t      *q;
724         drm_buf_t        *buf;
725         int              retcode   = 0;
726         int              processed = 0;
727         int              missed;
728         int              expire    = 20;
729         drm_device_dma_t *dma      = dev->dma;
730
731         printk("mga_dma_schedule\n");
732
733         if (test_and_set_bit(0, &dev->interrupt_flag)) {
734                                 /* Not reentrant */
735                 atomic_inc(&dma->total_missed_sched);
736                 return -EBUSY;
737         }
738         missed = atomic_read(&dma->total_missed_sched);
739
740 again:
741         /* There is only one queue:
742          */
743         if (!dma->next_buffer && DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
744                 q   = dev->queuelist[DRM_KERNEL_CONTEXT];
745                 buf = drm_waitlist_get(&q->waitlist);
746                 dma->next_buffer = buf;
747                 dma->next_queue  = q;
748                 if (buf && buf->list == DRM_LIST_RECLAIM) {
749                         drm_clear_next_buffer(dev);
750                         drm_free_buffer(dev, buf);
751                 }
752         }
753
754         if (dma->next_buffer) {
755                 if (!(retcode = mga_do_dma(dev, locked))) 
756                         ++processed;
757         }
758
759         /* Try again if we succesfully dispatched a buffer, or if someone 
760          * tried to schedule while we were working.
761          */
762         if (--expire) {
763                 if (missed != atomic_read(&dma->total_missed_sched)) {
764                         atomic_inc(&dma->total_lost);
765                         if (mga_dma_is_ready_no_hold(dev)) 
766                                 goto again;
767                 }
768
769                 if (processed && mga_dma_is_ready_no_hold(dev)) {
770                         atomic_inc(&dma->total_lost);
771                         processed = 0;
772                         goto again;
773                 }
774         }
775         
776         clear_bit(0, &dev->interrupt_flag);
777         
778         return retcode;
779 }
780
781 int mga_irq_install(drm_device_t *dev, int irq)
782 {
783         int retcode;
784
785         if (!irq)     return -EINVAL;
786         
787         down(&dev->struct_sem);
788         if (dev->irq) {
789                 up(&dev->struct_sem);
790                 return -EBUSY;
791         }
792         dev->irq = irq;
793         up(&dev->struct_sem);
794         
795         printk("install irq handler %d\n", irq);
796
797         dev->context_flag     = 0;
798         dev->interrupt_flag   = 0;
799         dev->dma_flag         = 0;
800         dev->dma->next_buffer = NULL;
801         dev->dma->next_queue  = NULL;
802         dev->dma->this_buffer = NULL;
803         dev->tq.next          = NULL;
804         dev->tq.sync          = 0;
805         dev->tq.routine       = mga_dma_schedule_tq_wrapper;
806         dev->tq.data          = dev;
807
808                                 /* Before installing handler */
809         MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
810         MGA_WRITE(MGAREG_IEN, 0);
811
812                                 /* Install handler */
813         if ((retcode = request_irq(dev->irq,
814                                    mga_dma_service,
815                                    0,
816                                    dev->devname,
817                                    dev))) {
818                 down(&dev->struct_sem);
819                 dev->irq = 0;
820                 up(&dev->struct_sem);
821                 return retcode;
822         }
823
824                                 /* After installing handler */
825         MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
826         MGA_WRITE(MGAREG_IEN, 0x00000001);
827
828         return 0;
829 }
830
831 int mga_irq_uninstall(drm_device_t *dev)
832 {
833         int irq;
834
835         down(&dev->struct_sem);
836         irq      = dev->irq;
837         dev->irq = 0;
838         up(&dev->struct_sem);
839         
840         if (!irq) return -EINVAL;
841         
842         printk("remove irq handler %d\n", irq);
843
844         MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
845         MGA_WRITE(MGAREG_IEN, 0);
846         MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
847
848         free_irq(irq, dev);
849
850         return 0;
851 }
852
853
854 int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
855                   unsigned long arg)
856 {
857         drm_file_t      *priv   = filp->private_data;
858         drm_device_t    *dev    = priv->dev;
859         drm_control_t   ctl;
860    
861         copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
862         
863         switch (ctl.func) {
864         case DRM_INST_HANDLER:
865                 return mga_irq_install(dev, ctl.irq);
866         case DRM_UNINST_HANDLER:
867                 return mga_irq_uninstall(dev);
868         default:
869                 return -EINVAL;
870         }
871 }
872
873 int mga_flush_queue(drm_device_t *dev)
874 {
875         DECLARE_WAITQUEUE(entry, current);
876         drm_queue_t       *q = dev->queuelist[DRM_KERNEL_CONTEXT];
877         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
878         int ret = 0;
879         
880         printk("mga_flush_queue\n");
881         if(atomic_read(&dev_priv->pending_bufs) != 0) {
882            current->state = TASK_INTERRUPTIBLE;
883            add_wait_queue(&q->flush_queue, &entry);
884            for (;;) {
885                 if (!atomic_read(&dev_priv->pending_bufs)) break;
886                 printk("Calling schedule from flush_queue : %d\n",
887                        atomic_read(&dev_priv->pending_bufs));
888                 mga_dma_schedule(dev, 1);
889                 schedule();
890                 if (signal_pending(current)) {
891                         ret = -EINTR; /* Can't restart */
892                         break;
893                 }
894            }
895            printk("Exited out of schedule from flush_queue\n");
896            current->state = TASK_RUNNING;
897            remove_wait_queue(&q->flush_queue, &entry);
898         }
899    
900         return ret;
901 }
902
903 int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
904                unsigned long arg)
905 {
906         drm_file_t        *priv   = filp->private_data;
907         drm_device_t      *dev    = priv->dev;
908         DECLARE_WAITQUEUE(entry, current);
909         int               ret   = 0;
910         drm_lock_t        lock;
911
912         copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
913
914         if (lock.context == DRM_KERNEL_CONTEXT) {
915                 DRM_ERROR("Process %d using kernel context %d\n",
916                           current->pid, lock.context);
917                 return -EINVAL;
918         }
919
920         printk("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
921                   lock.context, current->pid, dev->lock.hw_lock->lock,
922                   lock.flags);
923
924
925         if (lock.context < 0) {
926                 return -EINVAL;
927         }
928
929         /* Only one queue:
930          */
931
932         if (!ret) {
933                 add_wait_queue(&dev->lock.lock_queue, &entry);
934                 for (;;) {
935                         if (!dev->lock.hw_lock) {
936                                 /* Device has been unregistered */
937                                 ret = -EINTR;
938                                 break;
939                         }
940                         if (drm_lock_take(&dev->lock.hw_lock->lock,
941                                           lock.context)) {
942                                 dev->lock.pid       = current->pid;
943                                 dev->lock.lock_time = jiffies;
944                                 atomic_inc(&dev->total_locks);
945                                 break;  /* Got lock */
946                         }
947                         
948                                 /* Contention */
949                         atomic_inc(&dev->total_sleeps);
950                         current->state = TASK_INTERRUPTIBLE;
951                         current->policy |= SCHED_YIELD;
952                         printk("Calling lock schedule\n");
953                         schedule();
954                         if (signal_pending(current)) {
955                                 ret = -ERESTARTSYS;
956                                 break;
957                         }
958                 }
959                 current->state = TASK_RUNNING;
960                 remove_wait_queue(&dev->lock.lock_queue, &entry);
961         }
962         
963         if (!ret) {
964                 if (lock.flags & _DRM_LOCK_QUIESCENT) {
965                         printk("_DRM_LOCK_QUIESCENT\n");
966                         ret = mga_flush_queue(dev);
967                         if(ret != 0) {
968                            drm_lock_free(dev, &dev->lock.hw_lock->lock,
969                                          lock.context);
970                         } else {
971                            mga_dma_quiescent(dev);
972                         }
973                 }
974         }
975         printk("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
976         return ret;
977 }
978