Import of XFree86 3.9.18
[gstreamer-omap:libdrm.git] / linux / mga_bufs.c
1 /* mga_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2  * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  * 
26  * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
27  *          Jeff Hartmann <jhartmann@precisioninsight.com>
28  * 
29  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_bufs.c,v 1.1 2000/02/11 17:26:06 dawes Exp $
30  *
31  */
32
33 #define __NO_VERSION__
34 #include "drmP.h"
35 #include "mga_drv.h"
36 #include "mga_dma.h"
37 #include "linux/un.h"
38
39
40 int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
41                     unsigned long arg)
42 {
43    drm_file_t *priv = filp->private_data;
44    drm_device_t *dev = priv->dev;
45    drm_device_dma_t *dma = dev->dma;
46    drm_buf_desc_t request;
47    drm_buf_entry_t *entry;
48    drm_buf_t *buf;
49    unsigned long offset;
50    unsigned long agp_offset;
51    int count;
52    int order;
53    int size;
54    int alignment;
55    int page_order;
56    int total;
57    int byte_count;
58    int i;
59
60    if (!dma) return -EINVAL;
61
62    copy_from_user_ret(&request,
63                       (drm_buf_desc_t *)arg,
64                       sizeof(request),
65                       -EFAULT);
66
67    count = request.count;
68    order = drm_order(request.size);
69    size = 1 << order;
70    agp_offset = request.agp_start;
71    alignment  = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
72    page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
73    total = PAGE_SIZE << page_order;
74    byte_count = 0;
75
76    DRM_DEBUG("count: %d\n", count);
77    DRM_DEBUG("order: %d\n", order);
78    DRM_DEBUG("size: %d\n", size);
79    DRM_DEBUG("agp_offset: %d\n", agp_offset);
80    DRM_DEBUG("alignment: %d\n", alignment);
81    DRM_DEBUG("page_order: %d\n", page_order);
82    DRM_DEBUG("total: %d\n", total);
83    DRM_DEBUG("byte_count: %d\n", byte_count);
84
85    if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
86    if (dev->queue_count) return -EBUSY; /* Not while in use */
87    spin_lock(&dev->count_lock);
88    if (dev->buf_use) {
89       spin_unlock(&dev->count_lock);
90       return -EBUSY;
91    }
92    atomic_inc(&dev->buf_alloc);
93    spin_unlock(&dev->count_lock);
94    
95    down(&dev->struct_sem);
96    entry = &dma->bufs[order];
97    if (entry->buf_count) {
98       up(&dev->struct_sem);
99       atomic_dec(&dev->buf_alloc);
100       return -ENOMEM; /* May only call once for each order */
101    }
102    
103    entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
104                               DRM_MEM_BUFS);
105    if (!entry->buflist) {
106       up(&dev->struct_sem);
107       atomic_dec(&dev->buf_alloc);
108       return -ENOMEM;
109    }
110    memset(entry->buflist, 0, count * sizeof(*entry->buflist));
111    
112    entry->buf_size   = size;
113    entry->page_order = page_order;
114    offset = 0;
115
116    
117    while(entry->buf_count < count) {
118       buf = &entry->buflist[entry->buf_count];
119       buf->idx = dma->buf_count + entry->buf_count;
120       buf->total = alignment;
121       buf->order = order;
122       buf->used = 0;
123
124       DRM_DEBUG("offset : %d\n", offset);
125
126       buf->offset = offset; /* Hrm */
127       buf->bus_address = dev->agp->base + agp_offset + offset;
128       buf->address = (void *)(agp_offset + offset + dev->agp->base);
129       buf->next = NULL;
130       buf->waiting = 0;
131       buf->pending = 0;
132       init_waitqueue_head(&buf->dma_wait);
133       buf->pid = 0;
134
135       buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
136       buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
137
138 #if DRM_DMA_HISTOGRAM
139       buf->time_queued = 0;
140       buf->time_dispatched = 0;
141       buf->time_completed = 0;
142       buf->time_freed = 0;
143 #endif
144       offset = offset + alignment;
145       entry->buf_count++;
146       byte_count += PAGE_SIZE << page_order;
147       
148       DRM_DEBUG("buffer %d @ %p\n",
149              entry->buf_count, buf->address);
150    }
151    
152    dma->buflist = drm_realloc(dma->buflist,
153                               dma->buf_count * sizeof(*dma->buflist),
154                               (dma->buf_count + entry->buf_count)
155                               * sizeof(*dma->buflist),
156                               DRM_MEM_BUFS);
157    for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
158      dma->buflist[i] = &entry->buflist[i - dma->buf_count];
159    
160    dma->buf_count  += entry->buf_count;
161
162    DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
163
164    dma->byte_count += byte_count;
165
166    DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
167
168    drm_freelist_create(&entry->freelist, entry->buf_count);
169    for (i = 0; i < entry->buf_count; i++) {
170       drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
171    }
172    
173    up(&dev->struct_sem);
174    
175    request.count = entry->buf_count;
176    request.size  = size;
177    
178    copy_to_user_ret((drm_buf_desc_t *)arg,
179                     &request,
180                     sizeof(request),
181                     -EFAULT);
182    
183    atomic_dec(&dev->buf_alloc);
184
185    DRM_DEBUG("count: %d\n", count);
186    DRM_DEBUG("order: %d\n", order);
187    DRM_DEBUG("size: %d\n", size);
188    DRM_DEBUG("agp_offset: %d\n", agp_offset);
189    DRM_DEBUG("alignment: %d\n", alignment);
190    DRM_DEBUG("page_order: %d\n", page_order);
191    DRM_DEBUG("total: %d\n", total);
192    DRM_DEBUG("byte_count: %d\n", byte_count);
193
194    dma->flags = _DRM_DMA_USE_AGP;
195
196    DRM_DEBUG("dma->flags : %lx\n", dma->flags);
197
198    return 0;
199 }
200
201 int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
202                     unsigned long arg)
203 {
204         drm_file_t       *priv   = filp->private_data;
205         drm_device_t     *dev    = priv->dev;
206         drm_device_dma_t *dma    = dev->dma;
207         drm_buf_desc_t   request;
208         int              count;
209         int              order;
210         int              size;
211         int              total;
212         int              page_order;
213         drm_buf_entry_t  *entry;
214         unsigned long    page;
215         drm_buf_t        *buf;
216         int              alignment;
217         unsigned long    offset;
218         int              i;
219         int              byte_count;
220         int              page_count;
221
222         if (!dma) return -EINVAL;
223
224         copy_from_user_ret(&request,
225                            (drm_buf_desc_t *)arg,
226                            sizeof(request),
227                            -EFAULT);
228
229         count      = request.count;
230         order      = drm_order(request.size);
231         size       = 1 << order;
232         
233         DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
234                   request.count, request.size, size, order, dev->queue_count);
235
236         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
237         if (dev->queue_count) return -EBUSY; /* Not while in use */
238
239         alignment  = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
240         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
241         total      = PAGE_SIZE << page_order;
242
243         spin_lock(&dev->count_lock);
244         if (dev->buf_use) {
245                 spin_unlock(&dev->count_lock);
246                 return -EBUSY;
247         }
248         atomic_inc(&dev->buf_alloc);
249         spin_unlock(&dev->count_lock);
250         
251         down(&dev->struct_sem);
252         entry = &dma->bufs[order];
253         if (entry->buf_count) {
254                 up(&dev->struct_sem);
255                 atomic_dec(&dev->buf_alloc);
256                 return -ENOMEM; /* May only call once for each order */
257         }
258         
259         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
260                                    DRM_MEM_BUFS);
261         if (!entry->buflist) {
262                 up(&dev->struct_sem);
263                 atomic_dec(&dev->buf_alloc);
264                 return -ENOMEM;
265         }
266         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
267
268         entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
269                                    DRM_MEM_SEGS);
270         if (!entry->seglist) {
271                 drm_free(entry->buflist,
272                          count * sizeof(*entry->buflist),
273                          DRM_MEM_BUFS);
274                 up(&dev->struct_sem);
275                 atomic_dec(&dev->buf_alloc);
276                 return -ENOMEM;
277         }
278         memset(entry->seglist, 0, count * sizeof(*entry->seglist));
279
280         dma->pagelist = drm_realloc(dma->pagelist,
281                                     dma->page_count * sizeof(*dma->pagelist),
282                                     (dma->page_count + (count << page_order))
283                                     * sizeof(*dma->pagelist),
284                                     DRM_MEM_PAGES);
285         DRM_DEBUG("pagelist: %d entries\n",
286                   dma->page_count + (count << page_order));
287
288
289         entry->buf_size   = size;
290         entry->page_order = page_order;
291         byte_count        = 0;
292         page_count        = 0;
293         while (entry->buf_count < count) {
294                 if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
295                 entry->seglist[entry->seg_count++] = page;
296                 for (i = 0; i < (1 << page_order); i++) {
297                         DRM_DEBUG("page %d @ 0x%08lx\n",
298                                   dma->page_count + page_count,
299                                   page + PAGE_SIZE * i);
300                         dma->pagelist[dma->page_count + page_count++]
301                                 = page + PAGE_SIZE * i;
302                 }
303                 for (offset = 0;
304                      offset + size <= total && entry->buf_count < count;
305                      offset += alignment, ++entry->buf_count) {
306                         buf          = &entry->buflist[entry->buf_count];
307                         buf->idx     = dma->buf_count + entry->buf_count;
308                         buf->total   = alignment;
309                         buf->order   = order;
310                         buf->used    = 0;
311                         buf->offset  = (dma->byte_count + byte_count + offset);
312                         buf->address = (void *)(page + offset);
313                         buf->next    = NULL;
314                         buf->waiting = 0;
315                         buf->pending = 0;
316                         init_waitqueue_head(&buf->dma_wait);
317                         buf->pid     = 0;
318 #if DRM_DMA_HISTOGRAM
319                         buf->time_queued     = 0;
320                         buf->time_dispatched = 0;
321                         buf->time_completed  = 0;
322                         buf->time_freed      = 0;
323 #endif
324                         DRM_DEBUG("buffer %d @ %p\n",
325                                   entry->buf_count, buf->address);
326                 }
327                 byte_count += PAGE_SIZE << page_order;
328         }
329
330         dma->buflist = drm_realloc(dma->buflist,
331                                    dma->buf_count * sizeof(*dma->buflist),
332                                    (dma->buf_count + entry->buf_count)
333                                    * sizeof(*dma->buflist),
334                                    DRM_MEM_BUFS);
335         for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
336                 dma->buflist[i] = &entry->buflist[i - dma->buf_count];
337
338         dma->buf_count  += entry->buf_count;
339         dma->seg_count  += entry->seg_count;
340         dma->page_count += entry->seg_count << page_order;
341         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
342         
343         drm_freelist_create(&entry->freelist, entry->buf_count);
344         for (i = 0; i < entry->buf_count; i++) {
345                 drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
346         }
347         
348         up(&dev->struct_sem);
349
350         request.count = entry->buf_count;
351         request.size  = size;
352
353         copy_to_user_ret((drm_buf_desc_t *)arg,
354                          &request,
355                          sizeof(request),
356                          -EFAULT);
357         
358         atomic_dec(&dev->buf_alloc);
359         return 0;
360 }
361
362 int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
363                 unsigned long arg)
364 {
365    drm_buf_desc_t        request;
366
367    copy_from_user_ret(&request,
368                       (drm_buf_desc_t *)arg,
369                       sizeof(request),
370                       -EFAULT);
371
372    if(request.flags & _DRM_AGP_BUFFER)
373      return mga_addbufs_agp(inode, filp, cmd, arg);
374    else
375      return mga_addbufs_pci(inode, filp, cmd, arg);
376 }
377
378 int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
379                  unsigned long arg)
380 {
381         drm_file_t       *priv   = filp->private_data;
382         drm_device_t     *dev    = priv->dev;
383         drm_device_dma_t *dma    = dev->dma;
384         drm_buf_info_t   request;
385         int              i;
386         int              count;
387
388         if (!dma) return -EINVAL;
389
390         spin_lock(&dev->count_lock);
391         if (atomic_read(&dev->buf_alloc)) {
392                 spin_unlock(&dev->count_lock);
393                 return -EBUSY;
394         }
395         ++dev->buf_use;         /* Can't allocate more after this call */
396         spin_unlock(&dev->count_lock);
397
398         copy_from_user_ret(&request,
399                            (drm_buf_info_t *)arg,
400                            sizeof(request),
401                            -EFAULT);
402
403         for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
404                 if (dma->bufs[i].buf_count) ++count;
405         }
406         
407         DRM_DEBUG("count = %d\n", count);
408         
409         if (request.count >= count) {
410                 for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
411                         if (dma->bufs[i].buf_count) {
412                                 copy_to_user_ret(&request.list[count].count,
413                                                  &dma->bufs[i].buf_count,
414                                                  sizeof(dma->bufs[0]
415                                                         .buf_count),
416                                                  -EFAULT);
417                                 copy_to_user_ret(&request.list[count].size,
418                                                  &dma->bufs[i].buf_size,
419                                                  sizeof(dma->bufs[0].buf_size),
420                                                  -EFAULT);
421                                 copy_to_user_ret(&request.list[count].low_mark,
422                                                  &dma->bufs[i]
423                                                  .freelist.low_mark,
424                                                  sizeof(dma->bufs[0]
425                                                         .freelist.low_mark),
426                                                  -EFAULT);
427                                 copy_to_user_ret(&request.list[count]
428                                                  .high_mark,
429                                                  &dma->bufs[i]
430                                                  .freelist.high_mark,
431                                                  sizeof(dma->bufs[0]
432                                                         .freelist.high_mark),
433                                                  -EFAULT);
434                                 DRM_DEBUG("%d %d %d %d %d\n",
435                                           i,
436                                           dma->bufs[i].buf_count,
437                                           dma->bufs[i].buf_size,
438                                           dma->bufs[i].freelist.low_mark,
439                                           dma->bufs[i].freelist.high_mark);
440                                 ++count;
441                         }
442                 }
443         }
444         request.count = count;
445
446         copy_to_user_ret((drm_buf_info_t *)arg,
447                          &request,
448                          sizeof(request),
449                          -EFAULT);
450         
451         return 0;
452 }
453
454 int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
455                  unsigned long arg)
456 {
457         drm_file_t       *priv   = filp->private_data;
458         drm_device_t     *dev    = priv->dev;
459         drm_device_dma_t *dma    = dev->dma;
460         drm_buf_desc_t   request;
461         int              order;
462         drm_buf_entry_t  *entry;
463
464         if (!dma) return -EINVAL;
465
466         copy_from_user_ret(&request,
467                            (drm_buf_desc_t *)arg,
468                            sizeof(request),
469                            -EFAULT);
470
471         DRM_DEBUG("%d, %d, %d\n",
472                   request.size, request.low_mark, request.high_mark);
473         order = drm_order(request.size);
474         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
475         entry = &dma->bufs[order];
476
477         if (request.low_mark < 0 || request.low_mark > entry->buf_count)
478                 return -EINVAL;
479         if (request.high_mark < 0 || request.high_mark > entry->buf_count)
480                 return -EINVAL;
481
482         entry->freelist.low_mark  = request.low_mark;
483         entry->freelist.high_mark = request.high_mark;
484         
485         return 0;
486 }
487
488 int mga_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
489                  unsigned long arg)
490 {
491         drm_file_t       *priv   = filp->private_data;
492         drm_device_t     *dev    = priv->dev;
493         drm_device_dma_t *dma    = dev->dma;
494         drm_buf_free_t   request;
495         int              i;
496         int              idx;
497         drm_buf_t        *buf;
498
499         if (!dma) return -EINVAL;
500
501         copy_from_user_ret(&request,
502                            (drm_buf_free_t *)arg,
503                            sizeof(request),
504                            -EFAULT);
505
506         DRM_DEBUG("%d\n", request.count);
507         for (i = 0; i < request.count; i++) {
508                 copy_from_user_ret(&idx,
509                                    &request.list[i],
510                                    sizeof(idx),
511                                    -EFAULT);
512                 if (idx < 0 || idx >= dma->buf_count) {
513                         DRM_ERROR("Index %d (of %d max)\n",
514                                   idx, dma->buf_count - 1);
515                         return -EINVAL;
516                 }
517                 buf = dma->buflist[idx];
518                 if (buf->pid != current->pid) {
519                         DRM_ERROR("Process %d freeing buffer owned by %d\n",
520                                   current->pid, buf->pid);
521                         return -EINVAL;
522                 }
523                 drm_free_buffer(dev, buf);
524         }
525         
526         return 0;
527 }
528
529 int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
530                 unsigned long arg)
531 {
532         drm_file_t       *priv   = filp->private_data;
533         drm_device_t     *dev    = priv->dev;
534         drm_device_dma_t *dma    = dev->dma;
535         int              retcode = 0;
536         const int        zero    = 0;
537         unsigned long    virtual;
538         unsigned long    address;
539         drm_buf_map_t    request;
540         int              i;
541
542         if (!dma) return -EINVAL;
543         
544         DRM_DEBUG("\n");
545
546         spin_lock(&dev->count_lock);
547         if (atomic_read(&dev->buf_alloc)) {
548                 spin_unlock(&dev->count_lock);
549            DRM_DEBUG("Buzy\n");
550                 return -EBUSY;
551         }
552         ++dev->buf_use;         /* Can't allocate more after this call */
553         spin_unlock(&dev->count_lock);
554
555         copy_from_user_ret(&request,
556                            (drm_buf_map_t *)arg,
557                            sizeof(request),
558                            -EFAULT);
559
560         DRM_DEBUG("mga_mapbufs\n");
561         DRM_DEBUG("dma->flags : %lx\n", dma->flags);
562    
563    if (request.count >= dma->buf_count) {
564       if(dma->flags & _DRM_DMA_USE_AGP) {
565          drm_mga_private_t *dev_priv = dev->dev_private;
566          drm_map_t *map = NULL;
567          
568          map = dev->maplist[dev_priv->buffer_map_idx];
569          if (!map) {
570             DRM_DEBUG("map is null\n");
571             retcode = -EINVAL;
572             goto done;
573          }
574
575          DRM_DEBUG("map->offset : %lx\n", map->offset);
576          DRM_DEBUG("map->size : %lx\n", map->size);
577          DRM_DEBUG("map->type : %d\n", map->type);
578          DRM_DEBUG("map->flags : %x\n", map->flags);
579          DRM_DEBUG("map->handle : %lx\n", map->handle);
580          DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
581
582          virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE,
583                            MAP_SHARED, (unsigned long)map->offset);
584       } else {
585               virtual = do_mmap(filp, 0, dma->byte_count,
586                                 PROT_READ|PROT_WRITE, MAP_SHARED, 0);
587       }
588       if (virtual > -1024UL) {
589          /* Real error */
590          DRM_DEBUG("mmap error\n");
591          retcode = (signed long)virtual;
592          goto done;
593       }
594       request.virtual = (void *)virtual;
595       
596       for (i = 0; i < dma->buf_count; i++) {
597          if (copy_to_user(&request.list[i].idx,
598                           &dma->buflist[i]->idx,
599                           sizeof(request.list[0].idx))) {
600             retcode = -EFAULT;
601             goto done;
602          }
603          if (copy_to_user(&request.list[i].total,
604                           &dma->buflist[i]->total,
605                           sizeof(request.list[0].total))) {
606             retcode = -EFAULT;
607             goto done;
608          }
609          if (copy_to_user(&request.list[i].used,
610                           &zero,
611                           sizeof(zero))) {
612             retcode = -EFAULT;
613             goto done;
614          }
615          address = virtual + dma->buflist[i]->offset;
616          if (copy_to_user(&request.list[i].address,
617                           &address,
618                           sizeof(address))) {
619             retcode = -EFAULT;
620             goto done;
621          }
622       }
623    }
624    done:
625    request.count = dma->buf_count;
626    DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
627    
628    copy_to_user_ret((drm_buf_map_t *)arg,
629                     &request,
630                     sizeof(request),
631                     -EFAULT);
632
633    DRM_DEBUG("retcode : %d\n", retcode);
634
635    return retcode;
636 }