1 /* dma.c -- DMA IOCTL and function support -*- linux-c -*- 2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com 3 * Revised: Fri Aug 20 13:06:51 1999 by faith@precisioninsight.com 5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 * DEALINGS IN THE SOFTWARE. 27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/dma.c,v 1.6 1999/08/20 20:00:53 faith Exp $ 32 #define __NO_VERSION__ 35 #include <linux/interrupt.h>/* For task queue support */ 37 voiddrm_dma_setup(drm_device_t
*dev
) 41 dev
->dma
=drm_alloc(sizeof(*dev
->dma
), DRM_MEM_DRIVER
); 42 memset(dev
->dma
,0,sizeof(*dev
->dma
)); 43 for(i
=0; i
<= DRM_MAX_ORDER
; i
++) 44 memset(&dev
->dma
->bufs
[i
],0,sizeof(dev
->dma
->bufs
[0])); 47 voiddrm_dma_takedown(drm_device_t
*dev
) 49 drm_device_dma_t
*dma
= dev
->dma
; 54 /* Clear dma buffers */ 55 for(i
=0; i
<= DRM_MAX_ORDER
; i
++) { 56 if(dma
->bufs
[i
].seg_count
) { 57 DRM_DEBUG("order %d: buf_count = %d," 60 dma
->bufs
[i
].buf_count
, 61 dma
->bufs
[i
].seg_count
); 62 for(j
=0; j
< dma
->bufs
[i
].seg_count
; j
++) { 63 drm_free_pages(dma
->bufs
[i
].seglist
[j
], 64 dma
->bufs
[i
].page_order
, 67 drm_free(dma
->bufs
[i
].buflist
, 69 *sizeof(*dma
->bufs
[0].buflist
), 71 drm_free(dma
->bufs
[i
].seglist
, 73 *sizeof(*dma
->bufs
[0].seglist
), 75 drm_freelist_destroy(&dma
->bufs
[i
].freelist
); 80 drm_free(dma
->buflist
, 81 dma
->buf_count
*sizeof(*dma
->buflist
), 86 drm_free(dma
->pagelist
, 87 dma
->page_count
*sizeof(*dma
->pagelist
), 90 drm_free(dev
->dma
,sizeof(*dev
->dma
), DRM_MEM_DRIVER
); 95 /* This is slow, but is useful for debugging. */ 96 intdrm_histogram_slot(unsigned long count
) 98 int value
= DRM_DMA_HISTOGRAM_INITIAL
; 102 slot
< DRM_DMA_HISTOGRAM_SLOTS
; 103 ++slot
, value
=DRM_DMA_HISTOGRAM_NEXT(value
)) { 104 if(count
< value
)return slot
; 106 return DRM_DMA_HISTOGRAM_SLOTS
-1; 109 voiddrm_histogram_compute(drm_device_t
*dev
, drm_buf_t
*buf
) 111 cycles_t queued_to_dispatched
; 112 cycles_t dispatched_to_completed
; 113 cycles_t completed_to_freed
; 114 int q2d
, d2c
, c2f
, q2c
, q2f
; 116 if(buf
->time_queued
) { 117 queued_to_dispatched
= (buf
->time_dispatched
119 dispatched_to_completed
= (buf
->time_completed
120 - buf
->time_dispatched
); 121 completed_to_freed
= (buf
->time_freed
122 - buf
->time_completed
); 124 q2d
=drm_histogram_slot(queued_to_dispatched
); 125 d2c
=drm_histogram_slot(dispatched_to_completed
); 126 c2f
=drm_histogram_slot(completed_to_freed
); 128 q2c
=drm_histogram_slot(queued_to_dispatched
129 + dispatched_to_completed
); 130 q2f
=drm_histogram_slot(queued_to_dispatched
131 + dispatched_to_completed
132 + completed_to_freed
); 134 atomic_inc(&dev
->histo
.total
); 135 atomic_inc(&dev
->histo
.queued_to_dispatched
[q2d
]); 136 atomic_inc(&dev
->histo
.dispatched_to_completed
[d2c
]); 137 atomic_inc(&dev
->histo
.completed_to_freed
[c2f
]); 139 atomic_inc(&dev
->histo
.queued_to_completed
[q2c
]); 140 atomic_inc(&dev
->histo
.queued_to_freed
[q2f
]); 144 buf
->time_dispatched
=0; 145 buf
->time_completed
=0; 150 voiddrm_free_buffer(drm_device_t
*dev
, drm_buf_t
*buf
) 152 drm_device_dma_t
*dma
= dev
->dma
; 160 #if DRM_DMA_HISTOGRAM 161 buf
->time_completed
=get_cycles(); 163 if(waitqueue_active(&buf
->dma_wait
)) { 164 wake_up_interruptible(&buf
->dma_wait
); 166 /* If processes are waiting, the last one 167 to wake will put the buffer on the free 168 list. If no processes are waiting, we 169 put the buffer on the freelist here. */ 170 drm_freelist_put(dev
, &dma
->bufs
[buf
->order
].freelist
, buf
); 174 voiddrm_reclaim_buffers(drm_device_t
*dev
, pid_t pid
) 176 drm_device_dma_t
*dma
= dev
->dma
; 179 for(i
=0; i
< dma
->buf_count
; i
++) { 180 if(dma
->buflist
[i
]->pid
== pid
) { 181 switch(dma
->buflist
[i
]->list
) { 183 drm_free_buffer(dev
, dma
->buflist
[i
]); 186 dma
->buflist
[i
]->list
= DRM_LIST_RECLAIM
; 189 /* Buffer already on hardware. */ 196 intdrm_context_switch(drm_device_t
*dev
,int old
,intnew) 201 atomic_inc(&dev
->total_ctx
); 203 if(test_and_set_bit(0, &dev
->context_flag
)) { 204 DRM_ERROR("Reentering -- FIXME\n"); 208 #if DRM_DMA_HISTOGRAM 209 dev
->ctx_start
=get_cycles(); 212 DRM_DEBUG("Context switch from %d to %d\n", old
,new); 214 if(new>= dev
->queue_count
) { 215 clear_bit(0, &dev
->context_flag
); 219 if(new== dev
->last_context
) { 220 clear_bit(0, &dev
->context_flag
); 224 q
= dev
->queuelist
[new]; 225 atomic_inc(&q
->use_count
); 226 if(atomic_read(&q
->use_count
) ==1) { 227 atomic_dec(&q
->use_count
); 228 clear_bit(0, &dev
->context_flag
); 232 if(drm_flags
& DRM_FLAG_NOCTX
) { 233 drm_context_switch_complete(dev
,new); 235 sprintf(buf
,"C %d %d\n", old
,new); 236 drm_write_string(dev
, buf
); 239 atomic_dec(&q
->use_count
); 244 intdrm_context_switch_complete(drm_device_t
*dev
,intnew) 246 drm_device_dma_t
*dma
= dev
->dma
; 248 dev
->last_context
=new;/* PRE/POST: This is the _only_ writer. */ 249 dev
->last_switch
= jiffies
; 251 if(!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) { 252 DRM_ERROR("Lock isn't held after context switch\n"); 255 if(!dma
|| !(dma
->next_buffer
&& dma
->next_buffer
->while_locked
)) { 256 if(drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
, 257 DRM_KERNEL_CONTEXT
)) { 258 DRM_ERROR("Cannot free lock\n"); 262 #if DRM_DMA_HISTOGRAM 263 atomic_inc(&dev
->histo
.ctx
[drm_histogram_slot(get_cycles() 267 clear_bit(0, &dev
->context_flag
); 268 wake_up_interruptible(&dev
->context_wait
); 273 voiddrm_clear_next_buffer(drm_device_t
*dev
) 275 drm_device_dma_t
*dma
= dev
->dma
; 277 dma
->next_buffer
= NULL
; 278 if(dma
->next_queue
&& !DRM_BUFCOUNT(&dma
->next_queue
->waitlist
)) { 279 wake_up_interruptible(&dma
->next_queue
->flush_queue
); 281 dma
->next_queue
= NULL
; 285 intdrm_select_queue(drm_device_t
*dev
,void(*wrapper
)(unsigned long)) 292 DRM_ERROR("No device\n"); 295 if(!dev
->queuelist
|| !dev
->queuelist
[DRM_KERNEL_CONTEXT
]) { 296 /* This only happens between the time the 297 interrupt is initialized and the time 298 the queues are initialized. */ 302 /* Doing "while locked" DMA? */ 303 if(DRM_WAITCOUNT(dev
, DRM_KERNEL_CONTEXT
)) { 304 return DRM_KERNEL_CONTEXT
; 307 /* If there are buffers on the last_context 308 queue, and we have not been executing 309 this context very long, continue to 310 execute this context. */ 311 if(dev
->last_switch
<= j
312 && dev
->last_switch
+ DRM_TIME_SLICE
> j
313 &&DRM_WAITCOUNT(dev
, dev
->last_context
)) { 314 return dev
->last_context
; 317 /* Otherwise, find a candidate */ 318 for(i
= dev
->last_checked
+1; i
< dev
->queue_count
; i
++) { 319 if(DRM_WAITCOUNT(dev
, i
)) { 320 candidate
= dev
->last_checked
= i
; 326 for(i
=0; i
< dev
->queue_count
; i
++) { 327 if(DRM_WAITCOUNT(dev
, i
)) { 328 candidate
= dev
->last_checked
= i
; 336 && candidate
!= dev
->last_context
337 && dev
->last_switch
<= j
338 && dev
->last_switch
+ DRM_TIME_SLICE
> j
) { 339 if(dev
->timer
.expires
!= dev
->last_switch
+ DRM_TIME_SLICE
) { 340 del_timer(&dev
->timer
); 341 dev
->timer
.function
= wrapper
; 342 dev
->timer
.data
= (unsigned long)dev
; 343 dev
->timer
.expires
= dev
->last_switch
+DRM_TIME_SLICE
; 344 add_timer(&dev
->timer
); 353 intdrm_dma_enqueue(drm_device_t
*dev
, drm_dma_t
*d
) 360 drm_device_dma_t
*dma
= dev
->dma
; 361 DECLARE_WAITQUEUE(entry
, current
); 363 DRM_DEBUG("%d\n", d
->send_count
); 365 if(d
->flags
& _DRM_DMA_WHILE_LOCKED
) { 366 int context
= dev
->lock
.hw_lock
->lock
; 368 if(!_DRM_LOCK_IS_HELD(context
)) { 369 DRM_ERROR("No lock held during\"while locked\"" 373 if(d
->context
!=_DRM_LOCKING_CONTEXT(context
) 374 &&_DRM_LOCKING_CONTEXT(context
) != DRM_KERNEL_CONTEXT
) { 375 DRM_ERROR("Lock held by %d while %d makes" 376 "\"while locked\"request\n", 377 _DRM_LOCKING_CONTEXT(context
), 381 q
= dev
->queuelist
[DRM_KERNEL_CONTEXT
]; 384 q
= dev
->queuelist
[d
->context
]; 388 atomic_inc(&q
->use_count
); 389 if(atomic_read(&q
->block_write
)) { 390 current
->state
= TASK_INTERRUPTIBLE
; 391 add_wait_queue(&q
->write_queue
, &entry
); 392 atomic_inc(&q
->block_count
); 394 if(!atomic_read(&q
->block_write
))break; 396 if(signal_pending(current
)) { 397 atomic_dec(&q
->use_count
); 401 atomic_dec(&q
->block_count
); 402 current
->state
= TASK_RUNNING
; 403 remove_wait_queue(&q
->write_queue
, &entry
); 406 for(i
=0; i
< d
->send_count
; i
++) { 407 idx
= d
->send_indices
[i
]; 408 if(idx
<0|| idx
>= dma
->buf_count
) { 409 atomic_dec(&q
->use_count
); 410 DRM_ERROR("Index %d (of %d max)\n", 411 d
->send_indices
[i
], dma
->buf_count
-1); 414 buf
= dma
->buflist
[ idx
]; 415 if(buf
->pid
!= current
->pid
) { 416 atomic_dec(&q
->use_count
); 417 DRM_ERROR("Process %d using buffer owned by %d\n", 418 current
->pid
, buf
->pid
); 421 if(buf
->list
!= DRM_LIST_NONE
) { 422 atomic_dec(&q
->use_count
); 423 DRM_ERROR("Process %d using buffer %d on list %d\n", 424 current
->pid
, buf
->idx
, buf
->list
); 426 buf
->used
= d
->send_sizes
[i
]; 427 buf
->while_locked
= while_locked
; 428 buf
->context
= d
->context
; 430 DRM_ERROR("Queueing 0 length buffer\n"); 433 atomic_dec(&q
->use_count
); 434 DRM_ERROR("Queueing pending buffer:" 435 " buffer %d, offset %d\n", 436 d
->send_indices
[i
], i
); 440 atomic_dec(&q
->use_count
); 441 DRM_ERROR("Queueing waiting buffer:" 442 " buffer %d, offset %d\n", 443 d
->send_indices
[i
], i
); 447 if(atomic_read(&q
->use_count
) ==1 448 ||atomic_read(&q
->finalization
)) { 449 drm_free_buffer(dev
, buf
); 451 drm_waitlist_put(&q
->waitlist
, buf
); 452 atomic_inc(&q
->total_queued
); 455 atomic_dec(&q
->use_count
); 460 static intdrm_dma_get_buffers_of_order(drm_device_t
*dev
, drm_dma_t
*d
, 465 drm_device_dma_t
*dma
= dev
->dma
; 467 for(i
= d
->granted_count
; i
< d
->request_count
; i
++) { 468 buf
=drm_freelist_get(&dma
->bufs
[order
].freelist
, 469 d
->flags
& _DRM_DMA_WAIT
); 471 if(buf
->pending
|| buf
->waiting
) { 472 DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n", 478 buf
->pid
= current
->pid
; 479 copy_to_user_ret(&d
->request_indices
[i
], 483 copy_to_user_ret(&d
->request_sizes
[i
], 493 intdrm_dma_get_buffers(drm_device_t
*dev
, drm_dma_t
*dma
) 499 order
=drm_order(dma
->request_size
); 501 dma
->granted_count
=0; 502 retcode
=drm_dma_get_buffers_of_order(dev
, dma
, order
); 504 if(dma
->granted_count
< dma
->request_count
505 && (dma
->flags
& _DRM_DMA_SMALLER_OK
)) { 506 for(tmp_order
= order
-1; 508 && dma
->granted_count
< dma
->request_count
509 && tmp_order
>= DRM_MIN_ORDER
; 512 retcode
=drm_dma_get_buffers_of_order(dev
, dma
, 517 if(dma
->granted_count
< dma
->request_count
518 && (dma
->flags
& _DRM_DMA_LARGER_OK
)) { 519 for(tmp_order
= order
+1; 521 && dma
->granted_count
< dma
->request_count
522 && tmp_order
<= DRM_MAX_ORDER
; 525 retcode
=drm_dma_get_buffers_of_order(dev
, dma
,