Import of XFree86 3.9.18
[gstreamer-omap:libdrm.git] / linux / mga_clear.c
1 /* mga_state.c -- State support for mga g200/g400 -*- linux-c -*-
2  *
3  * Created: February 2000 by keithw@precisioninsight.com
4  *
5  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  * 
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  * 
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors: 
28  *           Keith Whitwell <keithw@precisioninsight.com>
29  *
30  */
31  
32 #define __NO_VERSION__
33 #include "drmP.h"
34 #include "mga_drv.h"
35 #include "mgareg_flags.h"
36 #include "mga_dma.h"
37 #include "mga_state.h"
38
39 #define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable |               \
40                        DC_sgnzero_enable | DC_shftzero_enable |         \
41                        (0xC << DC_bop_SHIFT) | DC_clipdis_enable |      \
42                        DC_solid_enable | DC_transc_enable)
43           
44
45 #define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy |   \
46                       DC_solid_disable | DC_arzero_disable |            \
47                       DC_sgnzero_enable | DC_shftzero_enable |          \
48                       (0xC << DC_bop_SHIFT) | DC_bltmod_bfcol |         \
49                       DC_pattern_disable | DC_transc_disable |          \
50                       DC_clipdis_enable)                                \
51
52
53
54 /* Build and queue a TT_GENERAL secondary buffer to do the clears.
55  * With Jeff's ringbuffer idea, it might make sense if there are only
56  * one or two cliprects to emit straight to the primary buffer.
57  */
58 static int mgaClearBuffers(drm_device_t *dev,
59                            int clear_color,
60                            int clear_depth,
61                            int flags)
62 {
63         int cmd, i;     
64         drm_device_dma_t *dma = dev->dma;
65         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;   
66         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
67         xf86drmClipRectRec *pbox = sarea_priv->boxes;
68         int nbox = sarea_priv->nbox;
69         drm_buf_t *buf;
70         drm_dma_t d;
71         int order = 10;         /* ??? what orders do we have ???*/
72         DMALOCALS;
73
74
75         if (!nbox) 
76                 return -EINVAL;
77
78         if ( dev_priv->sgram ) 
79                 cmd = MGA_CLEAR_CMD | DC_atype_blk;
80         else
81                 cmd = MGA_CLEAR_CMD | DC_atype_rstr;
82             
83         buf = drm_freelist_get(&dma->bufs[order].freelist, _DRM_DMA_WAIT);
84
85
86         DMAGETPTR( buf );
87
88         for (i = 0 ; i < nbox ; i++) {
89                 unsigned int height = pbox[i].y2 - pbox[i].y1;
90                 
91                 /* Is it necessary to be this paranoid?  I don't think so.
92                 if (pbox[i].x1 > dev_priv->width) continue;
93                 if (pbox[i].y1 > dev_priv->height) continue;
94                 if (pbox[i].x2 > dev_priv->width) continue;
95                 if (pbox[i].y2 > dev_priv->height) continue;
96                 if (pbox[i].x2 <= pbox[i].x1) continue;
97                 if (pbox[i].y2 <= pbox[i].x1) continue;
98                  */
99
100                 DMAOUTREG(MGAREG_YDSTLEN, (pbox[i].y1<<16)|height);
101                 DMAOUTREG(MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1);
102
103                 if ( flags & MGA_CLEAR_FRONT ) {            
104                         DMAOUTREG(MGAREG_FCOL, clear_color);
105                         DMAOUTREG(MGAREG_DSTORG, dev_priv->frontOrg);
106                         DMAOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
107                 }
108
109                 if ( flags & MGA_CLEAR_BACK ) {
110                         DMAOUTREG(MGAREG_FCOL, clear_color);
111                         DMAOUTREG(MGAREG_DSTORG, dev_priv->backOrg);
112                         DMAOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
113                 }
114
115                 if ( flags & MGA_CLEAR_DEPTH ) 
116                 {
117                         DMAOUTREG(MGAREG_FCOL, clear_depth);
118                         DMAOUTREG(MGAREG_DSTORG, dev_priv->depthOrg);
119                         DMAOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
120                 }
121         }
122
123         DMAADVANCE( buf );
124
125         /* Make sure we restore the 3D state next time.
126          */
127         sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
128
129         ((drm_mga_buf_priv_t *)buf->dev_private)->dma_type = MGA_DMA_GENERAL;
130
131         d.context = DRM_KERNEL_CONTEXT;
132         d.send_count = 1;
133         d.send_indices = &buf->idx;
134         d.send_sizes = &buf->used;
135         d.flags = 0;
136         d.request_count = 0;
137         d.request_size = 0;
138         d.request_indices = NULL;
139         d.request_sizes = NULL;
140         d.granted_count = 0;       
141
142         atomic_inc(&dev_priv->pending_bufs);
143         if((drm_dma_enqueue(dev, &d)) != 0) 
144                 atomic_dec(&dev_priv->pending_bufs);
145         mga_dma_schedule(dev, 1);
146         return 0;
147 }
148
149 int mgaSwapBuffers(drm_device_t *dev, int flags) 
150 {
151         drm_device_dma_t *dma = dev->dma;
152         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
153         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
154         xf86drmClipRectRec *pbox = sarea_priv->boxes;
155         int nbox = sarea_priv->nbox;
156         drm_buf_t *buf;
157         drm_dma_t d;
158         int order = 10;         /* ??? */
159         int i;
160         DMALOCALS;      
161
162         if (!nbox) 
163                 return -EINVAL;
164
165         buf = drm_freelist_get(&dma->bufs[order].freelist, _DRM_DMA_WAIT);
166
167         DMAGETPTR(buf);
168
169         DMAOUTREG(MGAREG_DSTORG, dev_priv->frontOrg);
170         DMAOUTREG(MGAREG_MACCESS, dev_priv->mAccess);
171         DMAOUTREG(MGAREG_SRCORG, dev_priv->backOrg);
172         DMAOUTREG(MGAREG_AR5, dev_priv->stride); /* unnecessary? */
173         DMAOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD); 
174              
175         for (i = 0 ; i < nbox; i++) {
176                 unsigned int h = pbox[i].y2 - pbox[i].y1;
177                 unsigned int start = pbox[i].y1 * dev_priv->stride;
178
179                 /*
180                 if (pbox[i].x1 > dev_priv->width) continue;
181                 if (pbox[i].y1 > dev_priv->height) continue;
182                 if (pbox[i].x2 > dev_priv->width) continue;
183                 if (pbox[i].y2 > dev_priv->height) continue;
184                 if (pbox[i].x2 <= pbox[i].x1) continue;
185                 if (pbox[i].y2 <= pbox[i].x1) continue;         
186                 */
187
188                 DMAOUTREG(MGAREG_AR0, start + pbox[i].x2 - 1);
189                 DMAOUTREG(MGAREG_AR3, start + pbox[i].x1);              
190                 DMAOUTREG(MGAREG_FXBNDRY, pbox[i].x1|((pbox[i].x2 - 1)<<16));
191                 DMAOUTREG(MGAREG_YDSTLEN+MGAREG_MGA_EXEC, (pbox[i].y1<<16)|h);
192         }
193   
194         DMAOUTREG(MGAREG_SRCORG, 0);
195         DMAADVANCE( buf );
196
197         /* Make sure we restore the 3D state next time.
198          */
199         sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
200
201         ((drm_mga_buf_priv_t *)buf->dev_private)->dma_type = MGA_DMA_GENERAL;
202
203         d.context = DRM_KERNEL_CONTEXT;
204         d.send_count = 1;
205         d.send_indices = &buf->idx;
206         d.send_sizes = &buf->used;
207         d.flags = 0;
208         d.request_count = 0;
209         d.request_size = 0;
210         d.request_indices = NULL;
211         d.request_sizes = NULL;
212         d.granted_count = 0;     
213
214         atomic_inc(&dev_priv->pending_bufs);
215         if((drm_dma_enqueue(dev, &d)) != 0) 
216                 atomic_dec(&dev_priv->pending_bufs);
217         mga_dma_schedule(dev, 1);
218         return 0;
219 }
220
221
222 static int mgaIload(drm_device_t *dev, drm_mga_iload_t *args)
223 {
224         drm_device_dma_t *dma = dev->dma;
225         drm_buf_t *buf = dma->buflist[ args->idx ];
226         drm_mga_buf_priv_t *buf_priv = (drm_mga_buf_priv_t *)buf->dev_private;
227         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
228         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
229         drm_dma_t d;
230         int pixperdword;
231         
232         buf_priv->dma_type = MGA_DMA_ILOAD;
233         buf_priv->boxes[0].y1 = args->texture.y1;
234         buf_priv->boxes[0].y2 = args->texture.y2;
235         buf_priv->boxes[0].x1 = args->texture.x1;
236         buf_priv->boxes[0].x2 = args->texture.x2;
237         buf_priv->ContextState[MGA_CTXREG_DSTORG] = args->destOrg;
238         buf_priv->ContextState[MGA_CTXREG_MACCESS] = args->mAccess;
239         buf_priv->ServerState[MGA_2DREG_PITCH] = args->pitch;
240         buf_priv->nbox = 1;   
241         sarea_priv->dirty |= (MGASAREA_NEW_CONTEXT | MGASAREA_NEW_2D);
242         switch((args->mAccess & 0x00000003)) {
243                 case 0:
244                         pixperdword = 4;
245                 break;
246                 case 1:
247                         pixperdword = 2;
248                 break;
249                 case 2:
250                         pixperdword = 1;
251                 break;
252                 default:
253                         DRM_ERROR("Invalid maccess value passed"
254                                   " to mgaIload\n");
255                 return -EINVAL;
256         }
257         buf->used = ((args->texture.y2 - args->texture.y1) *
258                      (args->texture.x2 - args->texture.x1) /
259                      pixperdword);
260         DRM_DEBUG("buf->used : %d\n", buf->used);
261         d.context = DRM_KERNEL_CONTEXT;
262         d.send_count = 1;
263         d.send_indices = &buf->idx;
264         d.send_sizes = &buf->used;
265         d.flags = 0;
266         d.request_count = 0;
267         d.request_size = 0;
268         d.request_indices = NULL;
269         d.request_sizes = NULL;
270         d.granted_count = 0;     
271    
272         atomic_inc(&dev_priv->pending_bufs);
273         if((drm_dma_enqueue(dev, &d)) != 0) 
274                 atomic_dec(&dev_priv->pending_bufs);
275         mga_dma_schedule(dev, 1);
276
277         return 0; 
278 }
279
280
281 /* Necessary?  Not necessary??
282  */
283 static int check_lock(void)
284 {
285         return 1;
286 }
287
288 int mga_clear_bufs(struct inode *inode, struct file *filp,
289                    unsigned int cmd, unsigned long arg)
290 {
291         drm_file_t *priv = filp->private_data;
292         drm_device_t *dev = priv->dev;
293         drm_mga_clear_t clear;
294         int retcode;
295    
296         copy_from_user_ret(&clear, (drm_mga_clear_t *)arg,
297                            sizeof(clear), -EFAULT);
298    
299 /*      if (!check_lock( dev )) */
300 /*              return -EIEIO; */
301                 
302         retcode = mgaClearBuffers(dev, clear.clear_color,
303                                   clear.clear_depth,
304                                   clear.flags);
305    
306         return retcode;
307 }
308
309 int mga_swap_bufs(struct inode *inode, struct file *filp,
310                   unsigned int cmd, unsigned long arg)
311 {
312         drm_file_t *priv = filp->private_data;
313         drm_device_t *dev = priv->dev;
314         drm_mga_swap_t swap;
315         int retcode = 0;
316
317 /*      if (!check_lock( dev )) */
318 /*              return -EIEIO; */
319    
320         copy_from_user_ret(&swap, (drm_mga_swap_t *)arg,
321                            sizeof(swap), -EFAULT);
322    
323         retcode = mgaSwapBuffers(dev, swap.flags);
324    
325         return retcode;
326 }
327
328 int mga_iload(struct inode *inode, struct file *filp,
329               unsigned int cmd, unsigned long arg)
330 {
331         drm_file_t *priv = filp->private_data;
332         drm_device_t *dev = priv->dev;
333         drm_mga_iload_t iload;
334         int retcode = 0;
335
336 /*      if (!check_lock( dev )) */
337 /*              return -EIEIO; */
338
339         copy_from_user_ret(&iload, (drm_mga_iload_t *)arg, 
340                            sizeof(iload), -EFAULT);
341    
342         retcode = mgaIload(dev, &iload);
343    
344         return retcode;
345 }
346
347
348 int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
349             unsigned long arg)
350 {
351         drm_file_t        *priv     = filp->private_data;
352         drm_device_t      *dev      = priv->dev;
353         drm_device_dma_t  *dma      = dev->dma;
354         int               retcode   = 0;
355         drm_dma_t         d;
356
357         copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
358         DRM_DEBUG("%d %d: %d send, %d req\n",
359                   current->pid, d.context, d.send_count, d.request_count);
360
361         /* Per-context queues are unworkable if you are trying to do
362          * state management from the client.
363          */
364         d.context = DRM_KERNEL_CONTEXT;
365         d.flags &= ~_DRM_DMA_WHILE_LOCKED;
366
367         /* Maybe multiple buffers is useful for iload...
368          * But this ioctl is only for *despatching* vertex data...
369          */
370         if (d.send_count < 0 || d.send_count > 1) {
371                 DRM_ERROR("Process %d trying to send %d buffers (max 1)\n",
372                           current->pid, d.send_count);
373                 return -EINVAL;
374         }
375
376         
377         /* But it *is* used to request buffers for all types of dma:
378          */
379         if (d.request_count < 0 || d.request_count > dma->buf_count) {
380                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
381                           current->pid, d.request_count, dma->buf_count);
382                 return -EINVAL;
383         }
384
385         if (d.send_count) {
386                 int idx = d.send_indices[0];
387                 drm_mga_buf_priv_t *buf_priv = dma->buflist[ idx ]->dev_private;
388                 drm_mga_private_t *dev_priv = dev->dev_private;
389
390                 buf_priv->dma_type = MGA_DMA_VERTEX;
391
392 /*              if (!check_lock( dev )) */
393 /*                      return -EIEIO; */
394
395                 /* Snapshot the relevent bits of the sarea... 
396                  */
397                 mgaCopyAndVerifyState( dev_priv, buf_priv );
398
399                 atomic_inc(&dev_priv->pending_bufs);
400                 retcode = drm_dma_enqueue(dev, &d);
401                 if(retcode != 0) 
402                         atomic_dec(&dev_priv->pending_bufs);
403                 mga_dma_schedule(dev, 1);
404         }
405         
406         d.granted_count = 0;
407
408         if (!retcode && d.request_count) {
409                 retcode = drm_dma_get_buffers(dev, &d);
410         }
411
412         DRM_DEBUG("%d returning, granted = %d\n",
413                   current->pid, d.granted_count);
414         copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
415
416         return retcode;
417 }