]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/qxl/qxl_kms.c
Merge tag 'drm-misc-next-2017-03-06' of git://anongit.freedesktop.org/git/drm-misc...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / qxl / qxl_kms.c
1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28
29 #include <drm/drm_crtc_helper.h>
30 #include <linux/io-mapping.h>
31
32 int qxl_log_level;
33
34 static void qxl_dump_mode(struct qxl_device *qdev, void *p)
35 {
36 struct qxl_mode *m = p;
37 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
38 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
39 m->y_mili, m->orientation);
40 }
41
42 static bool qxl_check_device(struct qxl_device *qdev)
43 {
44 struct qxl_rom *rom = qdev->rom;
45 int mode_offset;
46 int i;
47
48 if (rom->magic != 0x4f525851) {
49 DRM_ERROR("bad rom signature %x\n", rom->magic);
50 return false;
51 }
52
53 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
54 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
55 rom->log_level);
56 DRM_INFO("Currently using mode #%d, list at 0x%x\n",
57 rom->mode, rom->modes_offset);
58 DRM_INFO("%d io pages at offset 0x%x\n",
59 rom->num_io_pages, rom->pages_offset);
60 DRM_INFO("%d byte draw area at offset 0x%x\n",
61 rom->surface0_area_size, rom->draw_area_offset);
62
63 qdev->vram_size = rom->surface0_area_size;
64 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
65
66 mode_offset = rom->modes_offset / 4;
67 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
68 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
69 qdev->mode_info.num_modes);
70 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
71 for (i = 0; i < qdev->mode_info.num_modes; i++)
72 qxl_dump_mode(qdev, qdev->mode_info.modes + i);
73 return true;
74 }
75
76 static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
77 struct qxl_memslot *slot)
78 {
79 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
80 qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
81 qxl_io_memslot_add(qdev, slot_index);
82 }
83
84 static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
85 unsigned long start_phys_addr, unsigned long end_phys_addr)
86 {
87 uint64_t high_bits;
88 struct qxl_memslot *slot;
89 uint8_t slot_index;
90
91 slot_index = qdev->rom->slots_start + slot_index_offset;
92 slot = &qdev->mem_slots[slot_index];
93 slot->start_phys_addr = start_phys_addr;
94 slot->end_phys_addr = end_phys_addr;
95
96 setup_hw_slot(qdev, slot_index, slot);
97
98 slot->generation = qdev->rom->slot_generation;
99 high_bits = slot_index << qdev->slot_gen_bits;
100 high_bits |= slot->generation;
101 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
102 slot->high_bits = high_bits;
103 return slot_index;
104 }
105
106 void qxl_reinit_memslots(struct qxl_device *qdev)
107 {
108 setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
109 setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
110 }
111
112 static void qxl_gc_work(struct work_struct *work)
113 {
114 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
115 qxl_garbage_collect(qdev);
116 }
117
118 int qxl_device_init(struct qxl_device *qdev,
119 struct drm_driver *drv,
120 struct pci_dev *pdev)
121 {
122 int r, sb;
123
124 r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
125 if (r)
126 return r;
127
128 qdev->ddev.pdev = pdev;
129 pci_set_drvdata(pdev, &qdev->ddev);
130 qdev->ddev.dev_private = qdev;
131
132 mutex_init(&qdev->gem.mutex);
133 mutex_init(&qdev->update_area_mutex);
134 mutex_init(&qdev->release_mutex);
135 mutex_init(&qdev->surf_evict_mutex);
136 qxl_gem_init(qdev);
137
138 qdev->rom_base = pci_resource_start(pdev, 2);
139 qdev->rom_size = pci_resource_len(pdev, 2);
140 qdev->vram_base = pci_resource_start(pdev, 0);
141 qdev->io_base = pci_resource_start(pdev, 3);
142
143 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
144
145 if (pci_resource_len(pdev, 4) > 0) {
146 /* 64bit surface bar present */
147 sb = 4;
148 qdev->surfaceram_base = pci_resource_start(pdev, sb);
149 qdev->surfaceram_size = pci_resource_len(pdev, sb);
150 qdev->surface_mapping =
151 io_mapping_create_wc(qdev->surfaceram_base,
152 qdev->surfaceram_size);
153 }
154 if (qdev->surface_mapping == NULL) {
155 /* 64bit surface bar not present (or mapping failed) */
156 sb = 1;
157 qdev->surfaceram_base = pci_resource_start(pdev, sb);
158 qdev->surfaceram_size = pci_resource_len(pdev, sb);
159 qdev->surface_mapping =
160 io_mapping_create_wc(qdev->surfaceram_base,
161 qdev->surfaceram_size);
162 }
163
164 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
165 (unsigned long long)qdev->vram_base,
166 (unsigned long long)pci_resource_end(pdev, 0),
167 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
168 (int)pci_resource_len(pdev, 0) / 1024,
169 (unsigned long long)qdev->surfaceram_base,
170 (unsigned long long)pci_resource_end(pdev, sb),
171 (int)qdev->surfaceram_size / 1024 / 1024,
172 (int)qdev->surfaceram_size / 1024,
173 (sb == 4) ? "64bit" : "32bit");
174
175 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
176 if (!qdev->rom) {
177 pr_err("Unable to ioremap ROM\n");
178 return -ENOMEM;
179 }
180
181 qxl_check_device(qdev);
182
183 r = qxl_bo_init(qdev);
184 if (r) {
185 DRM_ERROR("bo init failed %d\n", r);
186 return r;
187 }
188
189 qdev->ram_header = ioremap(qdev->vram_base +
190 qdev->rom->ram_header_offset,
191 sizeof(*qdev->ram_header));
192
193 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
194 sizeof(struct qxl_command),
195 QXL_COMMAND_RING_SIZE,
196 qdev->io_base + QXL_IO_NOTIFY_CMD,
197 false,
198 &qdev->display_event);
199
200 qdev->cursor_ring = qxl_ring_create(
201 &(qdev->ram_header->cursor_ring_hdr),
202 sizeof(struct qxl_command),
203 QXL_CURSOR_RING_SIZE,
204 qdev->io_base + QXL_IO_NOTIFY_CMD,
205 false,
206 &qdev->cursor_event);
207
208 qdev->release_ring = qxl_ring_create(
209 &(qdev->ram_header->release_ring_hdr),
210 sizeof(uint64_t),
211 QXL_RELEASE_RING_SIZE, 0, true,
212 NULL);
213
214 /* TODO - slot initialization should happen on reset. where is our
215 * reset handler? */
216 qdev->n_mem_slots = qdev->rom->slots_end;
217 qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
218 qdev->slot_id_bits = qdev->rom->slot_id_bits;
219 qdev->va_slot_mask =
220 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
221
222 qdev->mem_slots =
223 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
224 GFP_KERNEL);
225
226 idr_init(&qdev->release_idr);
227 spin_lock_init(&qdev->release_idr_lock);
228 spin_lock_init(&qdev->release_lock);
229
230 idr_init(&qdev->surf_id_idr);
231 spin_lock_init(&qdev->surf_id_idr_lock);
232
233 mutex_init(&qdev->async_io_mutex);
234
235 /* reset the device into a known state - no memslots, no primary
236 * created, no surfaces. */
237 qxl_io_reset(qdev);
238
239 /* must initialize irq before first async io - slot creation */
240 r = qxl_irq_init(qdev);
241 if (r)
242 return r;
243
244 /*
245 * Note that virtual is surface0. We rely on the single ioremap done
246 * before.
247 */
248 qdev->main_mem_slot = setup_slot(qdev, 0,
249 (unsigned long)qdev->vram_base,
250 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
251 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
252 (unsigned long)qdev->surfaceram_base,
253 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
254 DRM_INFO("main mem slot %d [%lx,%x]\n",
255 qdev->main_mem_slot,
256 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
257 DRM_INFO("surface mem slot %d [%lx,%lx]\n",
258 qdev->surfaces_mem_slot,
259 (unsigned long)qdev->surfaceram_base,
260 (unsigned long)qdev->surfaceram_size);
261
262
263 INIT_WORK(&qdev->gc_work, qxl_gc_work);
264
265 return 0;
266 }
267
268 void qxl_device_fini(struct qxl_device *qdev)
269 {
270 if (qdev->current_release_bo[0])
271 qxl_bo_unref(&qdev->current_release_bo[0]);
272 if (qdev->current_release_bo[1])
273 qxl_bo_unref(&qdev->current_release_bo[1]);
274 flush_work(&qdev->gc_work);
275 qxl_ring_free(qdev->command_ring);
276 qxl_ring_free(qdev->cursor_ring);
277 qxl_ring_free(qdev->release_ring);
278 qxl_gem_fini(qdev);
279 qxl_bo_fini(qdev);
280 io_mapping_free(qdev->surface_mapping);
281 io_mapping_free(qdev->vram_mapping);
282 iounmap(qdev->ram_header);
283 iounmap(qdev->rom);
284 qdev->rom = NULL;
285 qdev->mode_info.modes = NULL;
286 qdev->mode_info.num_modes = 0;
287 qxl_debugfs_remove_files(qdev);
288 }