2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_ramht.h"
33 nouveau_channel_pushbuf_init(struct nouveau_channel
*chan
)
35 u32 mem
= nouveau_vram_pushbuf
? TTM_PL_FLAG_VRAM
: TTM_PL_FLAG_TT
;
36 struct drm_device
*dev
= chan
->dev
;
37 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
40 /* allocate buffer object */
41 ret
= nouveau_bo_new(dev
, 65536, 0, mem
, 0, 0, &chan
->pushbuf_bo
);
45 ret
= nouveau_bo_pin(chan
->pushbuf_bo
, mem
);
49 ret
= nouveau_bo_map(chan
->pushbuf_bo
);
53 /* create DMA object covering the entire memtype where the push
54 * buffer resides, userspace can submit its own push buffers from
55 * anywhere within the same memtype.
57 chan
->pushbuf_base
= chan
->pushbuf_bo
->bo
.offset
;
58 if (dev_priv
->card_type
>= NV_50
) {
59 ret
= nouveau_bo_vma_add(chan
->pushbuf_bo
, chan
->vm
,
64 if (dev_priv
->card_type
< NV_C0
) {
65 ret
= nouveau_gpuobj_dma_new(chan
,
66 NV_CLASS_DMA_IN_MEMORY
, 0,
72 chan
->pushbuf_base
= chan
->pushbuf_vma
.offset
;
74 if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_TT
) {
75 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
76 dev_priv
->gart_info
.aper_size
,
81 if (dev_priv
->card_type
!= NV_04
) {
82 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
83 dev_priv
->fb_available_size
,
88 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
89 * exact reason for existing :) PCI access to cmdbuf in
92 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
93 pci_resource_start(dev
->pdev
, 1),
94 dev_priv
->fb_available_size
,
102 NV_ERROR(dev
, "error initialising pushbuf: %d\n", ret
);
103 nouveau_bo_vma_del(chan
->pushbuf_bo
, &chan
->pushbuf_vma
);
104 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
105 if (chan
->pushbuf_bo
) {
106 nouveau_bo_unmap(chan
->pushbuf_bo
);
107 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
114 /* allocates and initializes a fifo for user space consumption */
116 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
117 struct drm_file
*file_priv
,
118 uint32_t vram_handle
, uint32_t gart_handle
)
120 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
121 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
122 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
123 struct nouveau_channel
*chan
;
127 /* allocate and lock channel structure */
128 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
132 chan
->file_priv
= file_priv
;
133 chan
->vram_handle
= vram_handle
;
134 chan
->gart_handle
= gart_handle
;
136 kref_init(&chan
->ref
);
137 atomic_set(&chan
->users
, 1);
138 mutex_init(&chan
->mutex
);
139 mutex_lock(&chan
->mutex
);
141 /* allocate hw channel id */
142 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
143 for (chan
->id
= 0; chan
->id
< pfifo
->channels
; chan
->id
++) {
144 if (!dev_priv
->channels
.ptr
[chan
->id
]) {
145 nouveau_channel_ref(chan
, &dev_priv
->channels
.ptr
[chan
->id
]);
149 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
151 if (chan
->id
== pfifo
->channels
) {
152 mutex_unlock(&chan
->mutex
);
157 NV_DEBUG(dev
, "initialising channel %d\n", chan
->id
);
158 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
159 INIT_LIST_HEAD(&chan
->nvsw
.flip
);
160 INIT_LIST_HEAD(&chan
->fence
.pending
);
161 spin_lock_init(&chan
->fence
.lock
);
163 /* setup channel's memory and vm */
164 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, gart_handle
);
166 NV_ERROR(dev
, "gpuobj %d\n", ret
);
167 nouveau_channel_put(&chan
);
171 /* Allocate space for per-channel fixed notifier memory */
172 ret
= nouveau_notifier_init_channel(chan
);
174 NV_ERROR(dev
, "ntfy %d\n", ret
);
175 nouveau_channel_put(&chan
);
179 /* Allocate DMA push buffer */
180 ret
= nouveau_channel_pushbuf_init(chan
);
182 NV_ERROR(dev
, "pushbuf %d\n", ret
);
183 nouveau_channel_put(&chan
);
187 nouveau_dma_init(chan
);
188 chan
->user_put
= 0x40;
189 chan
->user_get
= 0x44;
190 if (dev_priv
->card_type
>= NV_50
)
191 chan
->user_get_hi
= 0x60;
193 /* disable the fifo caches */
194 pfifo
->reassign(dev
, false);
196 /* Construct initial RAMFC for new channel */
197 ret
= pfifo
->create_context(chan
);
199 nouveau_channel_put(&chan
);
203 pfifo
->reassign(dev
, true);
205 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
206 ret
= RING_SPACE(chan
, NOUVEAU_DMA_SKIPS
);
208 nouveau_channel_put(&chan
);
212 for (i
= 0; i
< NOUVEAU_DMA_SKIPS
; i
++)
213 OUT_RING (chan
, 0x00000000);
216 ret
= nouveau_fence_channel_init(chan
);
218 nouveau_channel_put(&chan
);
222 nouveau_debugfs_channel_init(chan
);
224 NV_DEBUG(dev
, "channel %d initialised\n", chan
->id
);
226 spin_lock(&fpriv
->lock
);
227 list_add(&chan
->list
, &fpriv
->channels
);
228 spin_unlock(&fpriv
->lock
);
234 struct nouveau_channel
*
235 nouveau_channel_get_unlocked(struct nouveau_channel
*ref
)
237 struct nouveau_channel
*chan
= NULL
;
239 if (likely(ref
&& atomic_inc_not_zero(&ref
->users
)))
240 nouveau_channel_ref(ref
, &chan
);
245 struct nouveau_channel
*
246 nouveau_channel_get(struct drm_file
*file_priv
, int id
)
248 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
249 struct nouveau_channel
*chan
;
251 spin_lock(&fpriv
->lock
);
252 list_for_each_entry(chan
, &fpriv
->channels
, list
) {
253 if (chan
->id
== id
) {
254 chan
= nouveau_channel_get_unlocked(chan
);
255 spin_unlock(&fpriv
->lock
);
256 mutex_lock(&chan
->mutex
);
260 spin_unlock(&fpriv
->lock
);
262 return ERR_PTR(-EINVAL
);
266 nouveau_channel_put_unlocked(struct nouveau_channel
**pchan
)
268 struct nouveau_channel
*chan
= *pchan
;
269 struct drm_device
*dev
= chan
->dev
;
270 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
271 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
275 /* decrement the refcount, and we're done if there's still refs */
276 if (likely(!atomic_dec_and_test(&chan
->users
))) {
277 nouveau_channel_ref(NULL
, pchan
);
281 /* no one wants the channel anymore */
282 NV_DEBUG(dev
, "freeing channel %d\n", chan
->id
);
283 nouveau_debugfs_channel_fini(chan
);
285 /* give it chance to idle */
286 nouveau_channel_idle(chan
);
288 /* ensure all outstanding fences are signaled. they should be if the
289 * above attempts at idling were OK, but if we failed this'll tell TTM
290 * we're done with the buffers.
292 nouveau_fence_channel_fini(chan
);
294 /* boot it off the hardware */
295 pfifo
->reassign(dev
, false);
297 /* destroy the engine specific contexts */
298 pfifo
->destroy_context(chan
);
299 for (i
= 0; i
< NVOBJ_ENGINE_NR
; i
++) {
301 dev_priv
->eng
[i
]->context_del(chan
, i
);
304 pfifo
->reassign(dev
, true);
306 /* aside from its resources, the channel should now be dead,
307 * remove it from the channel list
309 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
310 nouveau_channel_ref(NULL
, &dev_priv
->channels
.ptr
[chan
->id
]);
311 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
313 /* destroy any resources the channel owned */
314 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
315 if (chan
->pushbuf_bo
) {
316 nouveau_bo_vma_del(chan
->pushbuf_bo
, &chan
->pushbuf_vma
);
317 nouveau_bo_unmap(chan
->pushbuf_bo
);
318 nouveau_bo_unpin(chan
->pushbuf_bo
);
319 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
321 nouveau_ramht_ref(NULL
, &chan
->ramht
, chan
);
322 nouveau_notifier_takedown_channel(chan
);
323 nouveau_gpuobj_channel_takedown(chan
);
325 nouveau_channel_ref(NULL
, pchan
);
329 nouveau_channel_put(struct nouveau_channel
**pchan
)
331 mutex_unlock(&(*pchan
)->mutex
);
332 nouveau_channel_put_unlocked(pchan
);
336 nouveau_channel_del(struct kref
*ref
)
338 struct nouveau_channel
*chan
=
339 container_of(ref
, struct nouveau_channel
, ref
);
345 nouveau_channel_ref(struct nouveau_channel
*chan
,
346 struct nouveau_channel
**pchan
)
349 kref_get(&chan
->ref
);
352 kref_put(&(*pchan
)->ref
, nouveau_channel_del
);
358 nouveau_channel_idle(struct nouveau_channel
*chan
)
360 struct drm_device
*dev
= chan
->dev
;
361 struct nouveau_fence
*fence
= NULL
;
364 nouveau_fence_update(chan
);
366 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
367 ret
= nouveau_fence_new(chan
, &fence
, true);
369 ret
= nouveau_fence_wait(fence
, false, false);
370 nouveau_fence_unref(&fence
);
374 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
378 /* cleans up all the fifos from file_priv */
380 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
382 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
383 struct nouveau_engine
*engine
= &dev_priv
->engine
;
384 struct nouveau_channel
*chan
;
387 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
388 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
389 chan
= nouveau_channel_get(file_priv
, i
);
393 list_del(&chan
->list
);
394 atomic_dec(&chan
->users
);
395 nouveau_channel_put(&chan
);
400 /***********************************
401 * ioctls wrapping the functions
402 ***********************************/
405 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
406 struct drm_file
*file_priv
)
408 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
409 struct drm_nouveau_channel_alloc
*init
= data
;
410 struct nouveau_channel
*chan
;
413 if (!dev_priv
->eng
[NVOBJ_ENGINE_GR
])
416 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
419 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
420 init
->fb_ctxdma_handle
,
421 init
->tt_ctxdma_handle
);
424 init
->channel
= chan
->id
;
426 if (nouveau_vram_pushbuf
== 0) {
427 if (chan
->dma
.ib_max
)
428 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
429 NOUVEAU_GEM_DOMAIN_GART
;
430 else if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
431 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
433 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_GART
;
435 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
438 if (dev_priv
->card_type
< NV_C0
) {
439 init
->subchan
[0].handle
= NvSw
;
440 init
->subchan
[0].grclass
= NV_SW
;
441 init
->nr_subchan
= 1;
443 init
->nr_subchan
= 0;
446 /* Named memory object area */
447 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
448 &init
->notifier_handle
);
451 atomic_inc(&chan
->users
); /* userspace reference */
452 nouveau_channel_put(&chan
);
457 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
458 struct drm_file
*file_priv
)
460 struct drm_nouveau_channel_free
*req
= data
;
461 struct nouveau_channel
*chan
;
463 chan
= nouveau_channel_get(file_priv
, req
->channel
);
465 return PTR_ERR(chan
);
467 list_del(&chan
->list
);
468 atomic_dec(&chan
->users
);
469 nouveau_channel_put(&chan
);
473 /***********************************
474 * finally, the ioctl table
475 ***********************************/
477 struct drm_ioctl_desc nouveau_ioctls
[] = {
478 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_UNLOCKED
|DRM_AUTH
),
479 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_UNLOCKED
|DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
480 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
481 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_UNLOCKED
|DRM_AUTH
),
482 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
483 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
484 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_UNLOCKED
|DRM_AUTH
),
485 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_UNLOCKED
|DRM_AUTH
),
486 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_UNLOCKED
|DRM_AUTH
),
487 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
),
488 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
),
489 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_UNLOCKED
|DRM_AUTH
),
492 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);