]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/nouveau/nouveau_state.c
Merge commit 'jwb/merge' into merge
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / nouveau / nouveau_state.c
1 /*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <linux/swab.h>
27 #include "drmP.h"
28 #include "drm.h"
29 #include "drm_sarea.h"
30 #include "drm_crtc_helper.h"
31 #include <linux/vgaarb.h>
32
33 #include "nouveau_drv.h"
34 #include "nouveau_drm.h"
35 #include "nv50_display.h"
36
37 static int nouveau_stub_init(struct drm_device *dev) { return 0; }
38 static void nouveau_stub_takedown(struct drm_device *dev) {}
39
40 static int nouveau_init_engine_ptrs(struct drm_device *dev)
41 {
42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nouveau_engine *engine = &dev_priv->engine;
44
45 switch (dev_priv->chipset & 0xf0) {
46 case 0x00:
47 engine->instmem.init = nv04_instmem_init;
48 engine->instmem.takedown = nv04_instmem_takedown;
49 engine->instmem.suspend = nv04_instmem_suspend;
50 engine->instmem.resume = nv04_instmem_resume;
51 engine->instmem.populate = nv04_instmem_populate;
52 engine->instmem.clear = nv04_instmem_clear;
53 engine->instmem.bind = nv04_instmem_bind;
54 engine->instmem.unbind = nv04_instmem_unbind;
55 engine->instmem.prepare_access = nv04_instmem_prepare_access;
56 engine->instmem.finish_access = nv04_instmem_finish_access;
57 engine->mc.init = nv04_mc_init;
58 engine->mc.takedown = nv04_mc_takedown;
59 engine->timer.init = nv04_timer_init;
60 engine->timer.read = nv04_timer_read;
61 engine->timer.takedown = nv04_timer_takedown;
62 engine->fb.init = nv04_fb_init;
63 engine->fb.takedown = nv04_fb_takedown;
64 engine->graph.grclass = nv04_graph_grclass;
65 engine->graph.init = nv04_graph_init;
66 engine->graph.takedown = nv04_graph_takedown;
67 engine->graph.fifo_access = nv04_graph_fifo_access;
68 engine->graph.channel = nv04_graph_channel;
69 engine->graph.create_context = nv04_graph_create_context;
70 engine->graph.destroy_context = nv04_graph_destroy_context;
71 engine->graph.load_context = nv04_graph_load_context;
72 engine->graph.unload_context = nv04_graph_unload_context;
73 engine->fifo.channels = 16;
74 engine->fifo.init = nv04_fifo_init;
75 engine->fifo.takedown = nouveau_stub_takedown;
76 engine->fifo.disable = nv04_fifo_disable;
77 engine->fifo.enable = nv04_fifo_enable;
78 engine->fifo.reassign = nv04_fifo_reassign;
79 engine->fifo.cache_flush = nv04_fifo_cache_flush;
80 engine->fifo.cache_pull = nv04_fifo_cache_pull;
81 engine->fifo.channel_id = nv04_fifo_channel_id;
82 engine->fifo.create_context = nv04_fifo_create_context;
83 engine->fifo.destroy_context = nv04_fifo_destroy_context;
84 engine->fifo.load_context = nv04_fifo_load_context;
85 engine->fifo.unload_context = nv04_fifo_unload_context;
86 break;
87 case 0x10:
88 engine->instmem.init = nv04_instmem_init;
89 engine->instmem.takedown = nv04_instmem_takedown;
90 engine->instmem.suspend = nv04_instmem_suspend;
91 engine->instmem.resume = nv04_instmem_resume;
92 engine->instmem.populate = nv04_instmem_populate;
93 engine->instmem.clear = nv04_instmem_clear;
94 engine->instmem.bind = nv04_instmem_bind;
95 engine->instmem.unbind = nv04_instmem_unbind;
96 engine->instmem.prepare_access = nv04_instmem_prepare_access;
97 engine->instmem.finish_access = nv04_instmem_finish_access;
98 engine->mc.init = nv04_mc_init;
99 engine->mc.takedown = nv04_mc_takedown;
100 engine->timer.init = nv04_timer_init;
101 engine->timer.read = nv04_timer_read;
102 engine->timer.takedown = nv04_timer_takedown;
103 engine->fb.init = nv10_fb_init;
104 engine->fb.takedown = nv10_fb_takedown;
105 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
106 engine->graph.grclass = nv10_graph_grclass;
107 engine->graph.init = nv10_graph_init;
108 engine->graph.takedown = nv10_graph_takedown;
109 engine->graph.channel = nv10_graph_channel;
110 engine->graph.create_context = nv10_graph_create_context;
111 engine->graph.destroy_context = nv10_graph_destroy_context;
112 engine->graph.fifo_access = nv04_graph_fifo_access;
113 engine->graph.load_context = nv10_graph_load_context;
114 engine->graph.unload_context = nv10_graph_unload_context;
115 engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
116 engine->fifo.channels = 32;
117 engine->fifo.init = nv10_fifo_init;
118 engine->fifo.takedown = nouveau_stub_takedown;
119 engine->fifo.disable = nv04_fifo_disable;
120 engine->fifo.enable = nv04_fifo_enable;
121 engine->fifo.reassign = nv04_fifo_reassign;
122 engine->fifo.cache_flush = nv04_fifo_cache_flush;
123 engine->fifo.cache_pull = nv04_fifo_cache_pull;
124 engine->fifo.channel_id = nv10_fifo_channel_id;
125 engine->fifo.create_context = nv10_fifo_create_context;
126 engine->fifo.destroy_context = nv10_fifo_destroy_context;
127 engine->fifo.load_context = nv10_fifo_load_context;
128 engine->fifo.unload_context = nv10_fifo_unload_context;
129 break;
130 case 0x20:
131 engine->instmem.init = nv04_instmem_init;
132 engine->instmem.takedown = nv04_instmem_takedown;
133 engine->instmem.suspend = nv04_instmem_suspend;
134 engine->instmem.resume = nv04_instmem_resume;
135 engine->instmem.populate = nv04_instmem_populate;
136 engine->instmem.clear = nv04_instmem_clear;
137 engine->instmem.bind = nv04_instmem_bind;
138 engine->instmem.unbind = nv04_instmem_unbind;
139 engine->instmem.prepare_access = nv04_instmem_prepare_access;
140 engine->instmem.finish_access = nv04_instmem_finish_access;
141 engine->mc.init = nv04_mc_init;
142 engine->mc.takedown = nv04_mc_takedown;
143 engine->timer.init = nv04_timer_init;
144 engine->timer.read = nv04_timer_read;
145 engine->timer.takedown = nv04_timer_takedown;
146 engine->fb.init = nv10_fb_init;
147 engine->fb.takedown = nv10_fb_takedown;
148 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
149 engine->graph.grclass = nv20_graph_grclass;
150 engine->graph.init = nv20_graph_init;
151 engine->graph.takedown = nv20_graph_takedown;
152 engine->graph.channel = nv10_graph_channel;
153 engine->graph.create_context = nv20_graph_create_context;
154 engine->graph.destroy_context = nv20_graph_destroy_context;
155 engine->graph.fifo_access = nv04_graph_fifo_access;
156 engine->graph.load_context = nv20_graph_load_context;
157 engine->graph.unload_context = nv20_graph_unload_context;
158 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
159 engine->fifo.channels = 32;
160 engine->fifo.init = nv10_fifo_init;
161 engine->fifo.takedown = nouveau_stub_takedown;
162 engine->fifo.disable = nv04_fifo_disable;
163 engine->fifo.enable = nv04_fifo_enable;
164 engine->fifo.reassign = nv04_fifo_reassign;
165 engine->fifo.cache_flush = nv04_fifo_cache_flush;
166 engine->fifo.cache_pull = nv04_fifo_cache_pull;
167 engine->fifo.channel_id = nv10_fifo_channel_id;
168 engine->fifo.create_context = nv10_fifo_create_context;
169 engine->fifo.destroy_context = nv10_fifo_destroy_context;
170 engine->fifo.load_context = nv10_fifo_load_context;
171 engine->fifo.unload_context = nv10_fifo_unload_context;
172 break;
173 case 0x30:
174 engine->instmem.init = nv04_instmem_init;
175 engine->instmem.takedown = nv04_instmem_takedown;
176 engine->instmem.suspend = nv04_instmem_suspend;
177 engine->instmem.resume = nv04_instmem_resume;
178 engine->instmem.populate = nv04_instmem_populate;
179 engine->instmem.clear = nv04_instmem_clear;
180 engine->instmem.bind = nv04_instmem_bind;
181 engine->instmem.unbind = nv04_instmem_unbind;
182 engine->instmem.prepare_access = nv04_instmem_prepare_access;
183 engine->instmem.finish_access = nv04_instmem_finish_access;
184 engine->mc.init = nv04_mc_init;
185 engine->mc.takedown = nv04_mc_takedown;
186 engine->timer.init = nv04_timer_init;
187 engine->timer.read = nv04_timer_read;
188 engine->timer.takedown = nv04_timer_takedown;
189 engine->fb.init = nv10_fb_init;
190 engine->fb.takedown = nv10_fb_takedown;
191 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
192 engine->graph.grclass = nv30_graph_grclass;
193 engine->graph.init = nv30_graph_init;
194 engine->graph.takedown = nv20_graph_takedown;
195 engine->graph.fifo_access = nv04_graph_fifo_access;
196 engine->graph.channel = nv10_graph_channel;
197 engine->graph.create_context = nv20_graph_create_context;
198 engine->graph.destroy_context = nv20_graph_destroy_context;
199 engine->graph.load_context = nv20_graph_load_context;
200 engine->graph.unload_context = nv20_graph_unload_context;
201 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
202 engine->fifo.channels = 32;
203 engine->fifo.init = nv10_fifo_init;
204 engine->fifo.takedown = nouveau_stub_takedown;
205 engine->fifo.disable = nv04_fifo_disable;
206 engine->fifo.enable = nv04_fifo_enable;
207 engine->fifo.reassign = nv04_fifo_reassign;
208 engine->fifo.cache_flush = nv04_fifo_cache_flush;
209 engine->fifo.cache_pull = nv04_fifo_cache_pull;
210 engine->fifo.channel_id = nv10_fifo_channel_id;
211 engine->fifo.create_context = nv10_fifo_create_context;
212 engine->fifo.destroy_context = nv10_fifo_destroy_context;
213 engine->fifo.load_context = nv10_fifo_load_context;
214 engine->fifo.unload_context = nv10_fifo_unload_context;
215 break;
216 case 0x40:
217 case 0x60:
218 engine->instmem.init = nv04_instmem_init;
219 engine->instmem.takedown = nv04_instmem_takedown;
220 engine->instmem.suspend = nv04_instmem_suspend;
221 engine->instmem.resume = nv04_instmem_resume;
222 engine->instmem.populate = nv04_instmem_populate;
223 engine->instmem.clear = nv04_instmem_clear;
224 engine->instmem.bind = nv04_instmem_bind;
225 engine->instmem.unbind = nv04_instmem_unbind;
226 engine->instmem.prepare_access = nv04_instmem_prepare_access;
227 engine->instmem.finish_access = nv04_instmem_finish_access;
228 engine->mc.init = nv40_mc_init;
229 engine->mc.takedown = nv40_mc_takedown;
230 engine->timer.init = nv04_timer_init;
231 engine->timer.read = nv04_timer_read;
232 engine->timer.takedown = nv04_timer_takedown;
233 engine->fb.init = nv40_fb_init;
234 engine->fb.takedown = nv40_fb_takedown;
235 engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
236 engine->graph.grclass = nv40_graph_grclass;
237 engine->graph.init = nv40_graph_init;
238 engine->graph.takedown = nv40_graph_takedown;
239 engine->graph.fifo_access = nv04_graph_fifo_access;
240 engine->graph.channel = nv40_graph_channel;
241 engine->graph.create_context = nv40_graph_create_context;
242 engine->graph.destroy_context = nv40_graph_destroy_context;
243 engine->graph.load_context = nv40_graph_load_context;
244 engine->graph.unload_context = nv40_graph_unload_context;
245 engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
246 engine->fifo.channels = 32;
247 engine->fifo.init = nv40_fifo_init;
248 engine->fifo.takedown = nouveau_stub_takedown;
249 engine->fifo.disable = nv04_fifo_disable;
250 engine->fifo.enable = nv04_fifo_enable;
251 engine->fifo.reassign = nv04_fifo_reassign;
252 engine->fifo.cache_flush = nv04_fifo_cache_flush;
253 engine->fifo.cache_pull = nv04_fifo_cache_pull;
254 engine->fifo.channel_id = nv10_fifo_channel_id;
255 engine->fifo.create_context = nv40_fifo_create_context;
256 engine->fifo.destroy_context = nv40_fifo_destroy_context;
257 engine->fifo.load_context = nv40_fifo_load_context;
258 engine->fifo.unload_context = nv40_fifo_unload_context;
259 break;
260 case 0x50:
261 case 0x80: /* gotta love NVIDIA's consistency.. */
262 case 0x90:
263 case 0xA0:
264 engine->instmem.init = nv50_instmem_init;
265 engine->instmem.takedown = nv50_instmem_takedown;
266 engine->instmem.suspend = nv50_instmem_suspend;
267 engine->instmem.resume = nv50_instmem_resume;
268 engine->instmem.populate = nv50_instmem_populate;
269 engine->instmem.clear = nv50_instmem_clear;
270 engine->instmem.bind = nv50_instmem_bind;
271 engine->instmem.unbind = nv50_instmem_unbind;
272 engine->instmem.prepare_access = nv50_instmem_prepare_access;
273 engine->instmem.finish_access = nv50_instmem_finish_access;
274 engine->mc.init = nv50_mc_init;
275 engine->mc.takedown = nv50_mc_takedown;
276 engine->timer.init = nv04_timer_init;
277 engine->timer.read = nv04_timer_read;
278 engine->timer.takedown = nv04_timer_takedown;
279 engine->fb.init = nouveau_stub_init;
280 engine->fb.takedown = nouveau_stub_takedown;
281 engine->graph.grclass = nv50_graph_grclass;
282 engine->graph.init = nv50_graph_init;
283 engine->graph.takedown = nv50_graph_takedown;
284 engine->graph.fifo_access = nv50_graph_fifo_access;
285 engine->graph.channel = nv50_graph_channel;
286 engine->graph.create_context = nv50_graph_create_context;
287 engine->graph.destroy_context = nv50_graph_destroy_context;
288 engine->graph.load_context = nv50_graph_load_context;
289 engine->graph.unload_context = nv50_graph_unload_context;
290 engine->fifo.channels = 128;
291 engine->fifo.init = nv50_fifo_init;
292 engine->fifo.takedown = nv50_fifo_takedown;
293 engine->fifo.disable = nv04_fifo_disable;
294 engine->fifo.enable = nv04_fifo_enable;
295 engine->fifo.reassign = nv04_fifo_reassign;
296 engine->fifo.channel_id = nv50_fifo_channel_id;
297 engine->fifo.create_context = nv50_fifo_create_context;
298 engine->fifo.destroy_context = nv50_fifo_destroy_context;
299 engine->fifo.load_context = nv50_fifo_load_context;
300 engine->fifo.unload_context = nv50_fifo_unload_context;
301 break;
302 default:
303 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
304 return 1;
305 }
306
307 return 0;
308 }
309
310 static unsigned int
311 nouveau_vga_set_decode(void *priv, bool state)
312 {
313 if (state)
314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
316 else
317 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
318 }
319
320 static int
321 nouveau_card_init_channel(struct drm_device *dev)
322 {
323 struct drm_nouveau_private *dev_priv = dev->dev_private;
324 struct nouveau_gpuobj *gpuobj;
325 int ret;
326
327 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
328 (struct drm_file *)-2,
329 NvDmaFB, NvDmaTT);
330 if (ret)
331 return ret;
332
333 gpuobj = NULL;
334 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
335 0, nouveau_mem_fb_amount(dev),
336 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
337 &gpuobj);
338 if (ret)
339 goto out_err;
340
341 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
342 gpuobj, NULL);
343 if (ret)
344 goto out_err;
345
346 gpuobj = NULL;
347 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
348 dev_priv->gart_info.aper_size,
349 NV_DMA_ACCESS_RW, &gpuobj, NULL);
350 if (ret)
351 goto out_err;
352
353 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
354 gpuobj, NULL);
355 if (ret)
356 goto out_err;
357
358 return 0;
359 out_err:
360 nouveau_gpuobj_del(dev, &gpuobj);
361 nouveau_channel_free(dev_priv->channel);
362 dev_priv->channel = NULL;
363 return ret;
364 }
365
366 int
367 nouveau_card_init(struct drm_device *dev)
368 {
369 struct drm_nouveau_private *dev_priv = dev->dev_private;
370 struct nouveau_engine *engine;
371 int ret;
372
373 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
374
375 if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
376 return 0;
377
378 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
379
380 /* Initialise internal driver API hooks */
381 ret = nouveau_init_engine_ptrs(dev);
382 if (ret)
383 goto out;
384 engine = &dev_priv->engine;
385 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
386
387 /* Parse BIOS tables / Run init tables if card not POSTed */
388 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
389 ret = nouveau_bios_init(dev);
390 if (ret)
391 goto out;
392 }
393
394 ret = nouveau_gpuobj_early_init(dev);
395 if (ret)
396 goto out_bios;
397
398 /* Initialise instance memory, must happen before mem_init so we
399 * know exactly how much VRAM we're able to use for "normal"
400 * purposes.
401 */
402 ret = engine->instmem.init(dev);
403 if (ret)
404 goto out_gpuobj_early;
405
406 /* Setup the memory manager */
407 ret = nouveau_mem_init(dev);
408 if (ret)
409 goto out_instmem;
410
411 ret = nouveau_gpuobj_init(dev);
412 if (ret)
413 goto out_mem;
414
415 /* PMC */
416 ret = engine->mc.init(dev);
417 if (ret)
418 goto out_gpuobj;
419
420 /* PTIMER */
421 ret = engine->timer.init(dev);
422 if (ret)
423 goto out_mc;
424
425 /* PFB */
426 ret = engine->fb.init(dev);
427 if (ret)
428 goto out_timer;
429
430 /* PGRAPH */
431 ret = engine->graph.init(dev);
432 if (ret)
433 goto out_fb;
434
435 /* PFIFO */
436 ret = engine->fifo.init(dev);
437 if (ret)
438 goto out_graph;
439
440 /* this call irq_preinstall, register irq handler and
441 * call irq_postinstall
442 */
443 ret = drm_irq_install(dev);
444 if (ret)
445 goto out_fifo;
446
447 ret = drm_vblank_init(dev, 0);
448 if (ret)
449 goto out_irq;
450
451 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
452
453 if (!engine->graph.accel_blocked) {
454 ret = nouveau_card_init_channel(dev);
455 if (ret)
456 goto out_irq;
457 }
458
459 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
460 if (dev_priv->card_type >= NV_50)
461 ret = nv50_display_create(dev);
462 else
463 ret = nv04_display_create(dev);
464 if (ret)
465 goto out_irq;
466 }
467
468 ret = nouveau_backlight_init(dev);
469 if (ret)
470 NV_ERROR(dev, "Error %d registering backlight\n", ret);
471
472 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
473
474 if (drm_core_check_feature(dev, DRIVER_MODESET))
475 drm_helper_initial_config(dev);
476
477 return 0;
478
479 out_irq:
480 drm_irq_uninstall(dev);
481 out_fifo:
482 engine->fifo.takedown(dev);
483 out_graph:
484 engine->graph.takedown(dev);
485 out_fb:
486 engine->fb.takedown(dev);
487 out_timer:
488 engine->timer.takedown(dev);
489 out_mc:
490 engine->mc.takedown(dev);
491 out_gpuobj:
492 nouveau_gpuobj_takedown(dev);
493 out_mem:
494 nouveau_mem_close(dev);
495 out_instmem:
496 engine->instmem.takedown(dev);
497 out_gpuobj_early:
498 nouveau_gpuobj_late_takedown(dev);
499 out_bios:
500 nouveau_bios_takedown(dev);
501 out:
502 vga_client_register(dev->pdev, NULL, NULL, NULL);
503 return ret;
504 }
505
506 static void nouveau_card_takedown(struct drm_device *dev)
507 {
508 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_engine *engine = &dev_priv->engine;
510
511 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
512
513 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
514 nouveau_backlight_exit(dev);
515
516 if (dev_priv->channel) {
517 nouveau_channel_free(dev_priv->channel);
518 dev_priv->channel = NULL;
519 }
520
521 engine->fifo.takedown(dev);
522 engine->graph.takedown(dev);
523 engine->fb.takedown(dev);
524 engine->timer.takedown(dev);
525 engine->mc.takedown(dev);
526
527 mutex_lock(&dev->struct_mutex);
528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
529 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
530 mutex_unlock(&dev->struct_mutex);
531 nouveau_sgdma_takedown(dev);
532
533 nouveau_gpuobj_takedown(dev);
534 nouveau_mem_close(dev);
535 engine->instmem.takedown(dev);
536
537 if (drm_core_check_feature(dev, DRIVER_MODESET))
538 drm_irq_uninstall(dev);
539
540 nouveau_gpuobj_late_takedown(dev);
541 nouveau_bios_takedown(dev);
542
543 vga_client_register(dev->pdev, NULL, NULL, NULL);
544
545 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
546 }
547 }
548
549 /* here a client dies, release the stuff that was allocated for its
550 * file_priv */
551 void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
552 {
553 nouveau_channel_cleanup(dev, file_priv);
554 }
555
556 /* first module load, setup the mmio/fb mapping */
557 /* KMS: we need mmio at load time, not when the first drm client opens. */
558 int nouveau_firstopen(struct drm_device *dev)
559 {
560 return 0;
561 }
562
563 /* if we have an OF card, copy vbios to RAMIN */
564 static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
565 {
566 #if defined(__powerpc__)
567 int size, i;
568 const uint32_t *bios;
569 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
570 if (!dn) {
571 NV_INFO(dev, "Unable to get the OF node\n");
572 return;
573 }
574
575 bios = of_get_property(dn, "NVDA,BMP", &size);
576 if (bios) {
577 for (i = 0; i < size; i += 4)
578 nv_wi32(dev, i, bios[i/4]);
579 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
580 } else {
581 NV_INFO(dev, "Unable to get the OF bios\n");
582 }
583 #endif
584 }
585
586 int nouveau_load(struct drm_device *dev, unsigned long flags)
587 {
588 struct drm_nouveau_private *dev_priv;
589 uint32_t reg0;
590 resource_size_t mmio_start_offs;
591
592 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
593 if (!dev_priv)
594 return -ENOMEM;
595 dev->dev_private = dev_priv;
596 dev_priv->dev = dev;
597
598 dev_priv->flags = flags & NOUVEAU_FLAGS;
599 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
600
601 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
602 dev->pci_vendor, dev->pci_device, dev->pdev->class);
603
604 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
605
606 if (dev_priv->acpi_dsm)
607 nouveau_hybrid_setup(dev);
608
609 dev_priv->wq = create_workqueue("nouveau");
610 if (!dev_priv->wq)
611 return -EINVAL;
612
613 /* resource 0 is mmio regs */
614 /* resource 1 is linear FB */
615 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
616 /* resource 6 is bios */
617
618 /* map the mmio regs */
619 mmio_start_offs = pci_resource_start(dev->pdev, 0);
620 dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
621 if (!dev_priv->mmio) {
622 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
623 "Please report your setup to " DRIVER_EMAIL "\n");
624 return -EINVAL;
625 }
626 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
627 (unsigned long long)mmio_start_offs);
628
629 #ifdef __BIG_ENDIAN
630 /* Put the card in BE mode if it's not */
631 if (nv_rd32(dev, NV03_PMC_BOOT_1))
632 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
633
634 DRM_MEMORYBARRIER();
635 #endif
636
637 /* Time to determine the card architecture */
638 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
639
640 /* We're dealing with >=NV10 */
641 if ((reg0 & 0x0f000000) > 0) {
642 /* Bit 27-20 contain the architecture in hex */
643 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
644 /* NV04 or NV05 */
645 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
646 if (reg0 & 0x00f00000)
647 dev_priv->chipset = 0x05;
648 else
649 dev_priv->chipset = 0x04;
650 } else
651 dev_priv->chipset = 0xff;
652
653 switch (dev_priv->chipset & 0xf0) {
654 case 0x00:
655 case 0x10:
656 case 0x20:
657 case 0x30:
658 dev_priv->card_type = dev_priv->chipset & 0xf0;
659 break;
660 case 0x40:
661 case 0x60:
662 dev_priv->card_type = NV_40;
663 break;
664 case 0x50:
665 case 0x80:
666 case 0x90:
667 case 0xa0:
668 dev_priv->card_type = NV_50;
669 break;
670 default:
671 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
672 return -EINVAL;
673 }
674
675 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
676 dev_priv->card_type, reg0);
677
678 /* map larger RAMIN aperture on NV40 cards */
679 dev_priv->ramin = NULL;
680 if (dev_priv->card_type >= NV_40) {
681 int ramin_bar = 2;
682 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
683 ramin_bar = 3;
684
685 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
686 dev_priv->ramin = ioremap(
687 pci_resource_start(dev->pdev, ramin_bar),
688 dev_priv->ramin_size);
689 if (!dev_priv->ramin) {
690 NV_ERROR(dev, "Failed to init RAMIN mapping, "
691 "limited instance memory available\n");
692 }
693 }
694
695 /* On older cards (or if the above failed), create a map covering
696 * the BAR0 PRAMIN aperture */
697 if (!dev_priv->ramin) {
698 dev_priv->ramin_size = 1 * 1024 * 1024;
699 dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
700 dev_priv->ramin_size);
701 if (!dev_priv->ramin) {
702 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
703 return -ENOMEM;
704 }
705 }
706
707 nouveau_OF_copy_vbios_to_ramin(dev);
708
709 /* Special flags */
710 if (dev->pci_device == 0x01a0)
711 dev_priv->flags |= NV_NFORCE;
712 else if (dev->pci_device == 0x01f0)
713 dev_priv->flags |= NV_NFORCE2;
714
715 /* For kernel modesetting, init card now and bring up fbcon */
716 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
717 int ret = nouveau_card_init(dev);
718 if (ret)
719 return ret;
720 }
721
722 return 0;
723 }
724
725 static void nouveau_close(struct drm_device *dev)
726 {
727 struct drm_nouveau_private *dev_priv = dev->dev_private;
728
729 /* In the case of an error dev_priv may not be allocated yet */
730 if (dev_priv)
731 nouveau_card_takedown(dev);
732 }
733
734 /* KMS: we need mmio at load time, not when the first drm client opens. */
735 void nouveau_lastclose(struct drm_device *dev)
736 {
737 if (drm_core_check_feature(dev, DRIVER_MODESET))
738 return;
739
740 nouveau_close(dev);
741 }
742
743 int nouveau_unload(struct drm_device *dev)
744 {
745 struct drm_nouveau_private *dev_priv = dev->dev_private;
746
747 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
748 if (dev_priv->card_type >= NV_50)
749 nv50_display_destroy(dev);
750 else
751 nv04_display_destroy(dev);
752 nouveau_close(dev);
753 }
754
755 iounmap(dev_priv->mmio);
756 iounmap(dev_priv->ramin);
757
758 kfree(dev_priv);
759 dev->dev_private = NULL;
760 return 0;
761 }
762
763 int
764 nouveau_ioctl_card_init(struct drm_device *dev, void *data,
765 struct drm_file *file_priv)
766 {
767 return nouveau_card_init(dev);
768 }
769
770 int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
771 struct drm_file *file_priv)
772 {
773 struct drm_nouveau_private *dev_priv = dev->dev_private;
774 struct drm_nouveau_getparam *getparam = data;
775
776 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
777
778 switch (getparam->param) {
779 case NOUVEAU_GETPARAM_CHIPSET_ID:
780 getparam->value = dev_priv->chipset;
781 break;
782 case NOUVEAU_GETPARAM_PCI_VENDOR:
783 getparam->value = dev->pci_vendor;
784 break;
785 case NOUVEAU_GETPARAM_PCI_DEVICE:
786 getparam->value = dev->pci_device;
787 break;
788 case NOUVEAU_GETPARAM_BUS_TYPE:
789 if (drm_device_is_agp(dev))
790 getparam->value = NV_AGP;
791 else if (drm_device_is_pcie(dev))
792 getparam->value = NV_PCIE;
793 else
794 getparam->value = NV_PCI;
795 break;
796 case NOUVEAU_GETPARAM_FB_PHYSICAL:
797 getparam->value = dev_priv->fb_phys;
798 break;
799 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
800 getparam->value = dev_priv->gart_info.aper_base;
801 break;
802 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
803 if (dev->sg) {
804 getparam->value = (unsigned long)dev->sg->virtual;
805 } else {
806 NV_ERROR(dev, "Requested PCIGART address, "
807 "while no PCIGART was created\n");
808 return -EINVAL;
809 }
810 break;
811 case NOUVEAU_GETPARAM_FB_SIZE:
812 getparam->value = dev_priv->fb_available_size;
813 break;
814 case NOUVEAU_GETPARAM_AGP_SIZE:
815 getparam->value = dev_priv->gart_info.aper_size;
816 break;
817 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
818 getparam->value = dev_priv->vm_vram_base;
819 break;
820 default:
821 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
822 return -EINVAL;
823 }
824
825 return 0;
826 }
827
828 int
829 nouveau_ioctl_setparam(struct drm_device *dev, void *data,
830 struct drm_file *file_priv)
831 {
832 struct drm_nouveau_setparam *setparam = data;
833
834 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
835
836 switch (setparam->param) {
837 default:
838 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
839 return -EINVAL;
840 }
841
842 return 0;
843 }
844
845 /* Wait until (value(reg) & mask) == val, up until timeout has hit */
846 bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
847 uint32_t reg, uint32_t mask, uint32_t val)
848 {
849 struct drm_nouveau_private *dev_priv = dev->dev_private;
850 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
851 uint64_t start = ptimer->read(dev);
852
853 do {
854 if ((nv_rd32(dev, reg) & mask) == val)
855 return true;
856 } while (ptimer->read(dev) - start < timeout);
857
858 return false;
859 }
860
861 /* Waits for PGRAPH to go completely idle */
862 bool nouveau_wait_for_idle(struct drm_device *dev)
863 {
864 if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
865 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
866 nv_rd32(dev, NV04_PGRAPH_STATUS));
867 return false;
868 }
869
870 return true;
871 }
872