]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #ifndef __RADEON_ASIC_H__ | |
29 | #define __RADEON_ASIC_H__ | |
30 | ||
31 | /* | |
32 | * common functions | |
33 | */ | |
7433874e | 34 | uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); |
771fe6b9 JG |
35 | void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); |
36 | void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | |
37 | ||
7433874e | 38 | uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); |
771fe6b9 | 39 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); |
7433874e | 40 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev); |
771fe6b9 JG |
41 | void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); |
42 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | |
43 | ||
44 | /* | |
45 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | |
46 | */ | |
d4550907 JG |
47 | extern int r100_init(struct radeon_device *rdev); |
48 | extern void r100_fini(struct radeon_device *rdev); | |
49 | extern int r100_suspend(struct radeon_device *rdev); | |
50 | extern int r100_resume(struct radeon_device *rdev); | |
771fe6b9 JG |
51 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
52 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
771fe6b9 | 53 | int r100_gpu_reset(struct radeon_device *rdev); |
7ed220d7 | 54 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
771fe6b9 JG |
55 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
56 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | |
3ce0a23d | 57 | void r100_cp_commit(struct radeon_device *rdev); |
771fe6b9 JG |
58 | void r100_ring_start(struct radeon_device *rdev); |
59 | int r100_irq_set(struct radeon_device *rdev); | |
60 | int r100_irq_process(struct radeon_device *rdev); | |
61 | void r100_fence_ring_emit(struct radeon_device *rdev, | |
62 | struct radeon_fence *fence); | |
63 | int r100_cs_parse(struct radeon_cs_parser *p); | |
64 | void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
65 | uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); | |
66 | int r100_copy_blit(struct radeon_device *rdev, | |
67 | uint64_t src_offset, | |
68 | uint64_t dst_offset, | |
69 | unsigned num_pages, | |
70 | struct radeon_fence *fence); | |
e024e110 DA |
71 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
72 | uint32_t tiling_flags, uint32_t pitch, | |
73 | uint32_t offset, uint32_t obj_size); | |
74 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | |
c93bb85b | 75 | void r100_bandwidth_update(struct radeon_device *rdev); |
3ce0a23d | 76 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
3ce0a23d | 77 | int r100_ring_test(struct radeon_device *rdev); |
771fe6b9 JG |
78 | |
79 | static struct radeon_asic r100_asic = { | |
068a117c | 80 | .init = &r100_init, |
d4550907 JG |
81 | .fini = &r100_fini, |
82 | .suspend = &r100_suspend, | |
83 | .resume = &r100_resume, | |
771fe6b9 | 84 | .gpu_reset = &r100_gpu_reset, |
771fe6b9 JG |
85 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
86 | .gart_set_page = &r100_pci_gart_set_page, | |
3ce0a23d | 87 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 88 | .ring_start = &r100_ring_start, |
3ce0a23d JG |
89 | .ring_test = &r100_ring_test, |
90 | .ring_ib_execute = &r100_ring_ib_execute, | |
771fe6b9 JG |
91 | .irq_set = &r100_irq_set, |
92 | .irq_process = &r100_irq_process, | |
7ed220d7 | 93 | .get_vblank_counter = &r100_get_vblank_counter, |
771fe6b9 JG |
94 | .fence_ring_emit = &r100_fence_ring_emit, |
95 | .cs_parse = &r100_cs_parse, | |
96 | .copy_blit = &r100_copy_blit, | |
97 | .copy_dma = NULL, | |
98 | .copy = &r100_copy_blit, | |
7433874e | 99 | .get_engine_clock = &radeon_legacy_get_engine_clock, |
771fe6b9 | 100 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
7433874e | 101 | .get_memory_clock = NULL, |
771fe6b9 JG |
102 | .set_memory_clock = NULL, |
103 | .set_pcie_lanes = NULL, | |
104 | .set_clock_gating = &radeon_legacy_set_clock_gating, | |
e024e110 DA |
105 | .set_surface_reg = r100_set_surface_reg, |
106 | .clear_surface_reg = r100_clear_surface_reg, | |
c93bb85b | 107 | .bandwidth_update = &r100_bandwidth_update, |
771fe6b9 JG |
108 | }; |
109 | ||
110 | ||
111 | /* | |
112 | * r300,r350,rv350,rv380 | |
113 | */ | |
207bf9e9 JG |
114 | extern int r300_init(struct radeon_device *rdev); |
115 | extern void r300_fini(struct radeon_device *rdev); | |
116 | extern int r300_suspend(struct radeon_device *rdev); | |
117 | extern int r300_resume(struct radeon_device *rdev); | |
118 | extern int r300_gpu_reset(struct radeon_device *rdev); | |
119 | extern void r300_ring_start(struct radeon_device *rdev); | |
120 | extern void r300_fence_ring_emit(struct radeon_device *rdev, | |
121 | struct radeon_fence *fence); | |
122 | extern int r300_cs_parse(struct radeon_cs_parser *p); | |
123 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | |
124 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | |
125 | extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | |
126 | extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
127 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | |
128 | extern int r300_copy_dma(struct radeon_device *rdev, | |
129 | uint64_t src_offset, | |
130 | uint64_t dst_offset, | |
131 | unsigned num_pages, | |
132 | struct radeon_fence *fence); | |
771fe6b9 | 133 | static struct radeon_asic r300_asic = { |
068a117c | 134 | .init = &r300_init, |
207bf9e9 JG |
135 | .fini = &r300_fini, |
136 | .suspend = &r300_suspend, | |
137 | .resume = &r300_resume, | |
771fe6b9 | 138 | .gpu_reset = &r300_gpu_reset, |
771fe6b9 JG |
139 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
140 | .gart_set_page = &r100_pci_gart_set_page, | |
3ce0a23d | 141 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 142 | .ring_start = &r300_ring_start, |
3ce0a23d JG |
143 | .ring_test = &r100_ring_test, |
144 | .ring_ib_execute = &r100_ring_ib_execute, | |
771fe6b9 JG |
145 | .irq_set = &r100_irq_set, |
146 | .irq_process = &r100_irq_process, | |
7ed220d7 | 147 | .get_vblank_counter = &r100_get_vblank_counter, |
771fe6b9 JG |
148 | .fence_ring_emit = &r300_fence_ring_emit, |
149 | .cs_parse = &r300_cs_parse, | |
150 | .copy_blit = &r100_copy_blit, | |
151 | .copy_dma = &r300_copy_dma, | |
152 | .copy = &r100_copy_blit, | |
7433874e | 153 | .get_engine_clock = &radeon_legacy_get_engine_clock, |
771fe6b9 | 154 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
7433874e | 155 | .get_memory_clock = NULL, |
771fe6b9 JG |
156 | .set_memory_clock = NULL, |
157 | .set_pcie_lanes = &rv370_set_pcie_lanes, | |
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | |
e024e110 DA |
159 | .set_surface_reg = r100_set_surface_reg, |
160 | .clear_surface_reg = r100_clear_surface_reg, | |
c93bb85b | 161 | .bandwidth_update = &r100_bandwidth_update, |
771fe6b9 JG |
162 | }; |
163 | ||
164 | /* | |
165 | * r420,r423,rv410 | |
166 | */ | |
9f022ddf JG |
167 | extern int r420_init(struct radeon_device *rdev); |
168 | extern void r420_fini(struct radeon_device *rdev); | |
169 | extern int r420_suspend(struct radeon_device *rdev); | |
170 | extern int r420_resume(struct radeon_device *rdev); | |
771fe6b9 | 171 | static struct radeon_asic r420_asic = { |
9f022ddf JG |
172 | .init = &r420_init, |
173 | .fini = &r420_fini, | |
174 | .suspend = &r420_suspend, | |
175 | .resume = &r420_resume, | |
771fe6b9 | 176 | .gpu_reset = &r300_gpu_reset, |
771fe6b9 JG |
177 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
178 | .gart_set_page = &rv370_pcie_gart_set_page, | |
3ce0a23d | 179 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 180 | .ring_start = &r300_ring_start, |
3ce0a23d JG |
181 | .ring_test = &r100_ring_test, |
182 | .ring_ib_execute = &r100_ring_ib_execute, | |
771fe6b9 JG |
183 | .irq_set = &r100_irq_set, |
184 | .irq_process = &r100_irq_process, | |
7ed220d7 | 185 | .get_vblank_counter = &r100_get_vblank_counter, |
771fe6b9 JG |
186 | .fence_ring_emit = &r300_fence_ring_emit, |
187 | .cs_parse = &r300_cs_parse, | |
188 | .copy_blit = &r100_copy_blit, | |
189 | .copy_dma = &r300_copy_dma, | |
190 | .copy = &r100_copy_blit, | |
7433874e | 191 | .get_engine_clock = &radeon_atom_get_engine_clock, |
771fe6b9 | 192 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 193 | .get_memory_clock = &radeon_atom_get_memory_clock, |
771fe6b9 JG |
194 | .set_memory_clock = &radeon_atom_set_memory_clock, |
195 | .set_pcie_lanes = &rv370_set_pcie_lanes, | |
196 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
e024e110 DA |
197 | .set_surface_reg = r100_set_surface_reg, |
198 | .clear_surface_reg = r100_clear_surface_reg, | |
c93bb85b | 199 | .bandwidth_update = &r100_bandwidth_update, |
771fe6b9 JG |
200 | }; |
201 | ||
202 | ||
203 | /* | |
204 | * rs400,rs480 | |
205 | */ | |
ca6ffc64 JG |
206 | extern int rs400_init(struct radeon_device *rdev); |
207 | extern void rs400_fini(struct radeon_device *rdev); | |
208 | extern int rs400_suspend(struct radeon_device *rdev); | |
209 | extern int rs400_resume(struct radeon_device *rdev); | |
771fe6b9 JG |
210 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
211 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | |
212 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | |
213 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
214 | static struct radeon_asic rs400_asic = { | |
ca6ffc64 JG |
215 | .init = &rs400_init, |
216 | .fini = &rs400_fini, | |
217 | .suspend = &rs400_suspend, | |
218 | .resume = &rs400_resume, | |
771fe6b9 | 219 | .gpu_reset = &r300_gpu_reset, |
771fe6b9 JG |
220 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
221 | .gart_set_page = &rs400_gart_set_page, | |
3ce0a23d | 222 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 223 | .ring_start = &r300_ring_start, |
3ce0a23d JG |
224 | .ring_test = &r100_ring_test, |
225 | .ring_ib_execute = &r100_ring_ib_execute, | |
771fe6b9 JG |
226 | .irq_set = &r100_irq_set, |
227 | .irq_process = &r100_irq_process, | |
7ed220d7 | 228 | .get_vblank_counter = &r100_get_vblank_counter, |
771fe6b9 JG |
229 | .fence_ring_emit = &r300_fence_ring_emit, |
230 | .cs_parse = &r300_cs_parse, | |
231 | .copy_blit = &r100_copy_blit, | |
232 | .copy_dma = &r300_copy_dma, | |
233 | .copy = &r100_copy_blit, | |
7433874e | 234 | .get_engine_clock = &radeon_legacy_get_engine_clock, |
771fe6b9 | 235 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
7433874e | 236 | .get_memory_clock = NULL, |
771fe6b9 JG |
237 | .set_memory_clock = NULL, |
238 | .set_pcie_lanes = NULL, | |
239 | .set_clock_gating = &radeon_legacy_set_clock_gating, | |
e024e110 DA |
240 | .set_surface_reg = r100_set_surface_reg, |
241 | .clear_surface_reg = r100_clear_surface_reg, | |
c93bb85b | 242 | .bandwidth_update = &r100_bandwidth_update, |
771fe6b9 JG |
243 | }; |
244 | ||
245 | ||
246 | /* | |
247 | * rs600. | |
248 | */ | |
c010f800 JG |
249 | extern int rs600_init(struct radeon_device *rdev); |
250 | extern void rs600_fini(struct radeon_device *rdev); | |
251 | extern int rs600_suspend(struct radeon_device *rdev); | |
252 | extern int rs600_resume(struct radeon_device *rdev); | |
771fe6b9 | 253 | int rs600_irq_set(struct radeon_device *rdev); |
7ed220d7 MD |
254 | int rs600_irq_process(struct radeon_device *rdev); |
255 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | |
771fe6b9 JG |
256 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
257 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | |
258 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | |
259 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
c93bb85b | 260 | void rs600_bandwidth_update(struct radeon_device *rdev); |
771fe6b9 | 261 | static struct radeon_asic rs600_asic = { |
3f7dc91a | 262 | .init = &rs600_init, |
c010f800 JG |
263 | .fini = &rs600_fini, |
264 | .suspend = &rs600_suspend, | |
265 | .resume = &rs600_resume, | |
771fe6b9 | 266 | .gpu_reset = &r300_gpu_reset, |
771fe6b9 JG |
267 | .gart_tlb_flush = &rs600_gart_tlb_flush, |
268 | .gart_set_page = &rs600_gart_set_page, | |
3ce0a23d | 269 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 270 | .ring_start = &r300_ring_start, |
3ce0a23d JG |
271 | .ring_test = &r100_ring_test, |
272 | .ring_ib_execute = &r100_ring_ib_execute, | |
771fe6b9 | 273 | .irq_set = &rs600_irq_set, |
7ed220d7 MD |
274 | .irq_process = &rs600_irq_process, |
275 | .get_vblank_counter = &rs600_get_vblank_counter, | |
771fe6b9 JG |
276 | .fence_ring_emit = &r300_fence_ring_emit, |
277 | .cs_parse = &r300_cs_parse, | |
278 | .copy_blit = &r100_copy_blit, | |
279 | .copy_dma = &r300_copy_dma, | |
280 | .copy = &r100_copy_blit, | |
7433874e | 281 | .get_engine_clock = &radeon_atom_get_engine_clock, |
771fe6b9 | 282 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 283 | .get_memory_clock = &radeon_atom_get_memory_clock, |
771fe6b9 JG |
284 | .set_memory_clock = &radeon_atom_set_memory_clock, |
285 | .set_pcie_lanes = NULL, | |
286 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
c93bb85b | 287 | .bandwidth_update = &rs600_bandwidth_update, |
771fe6b9 JG |
288 | }; |
289 | ||
290 | ||
291 | /* | |
292 | * rs690,rs740 | |
293 | */ | |
3bc68535 JG |
294 | int rs690_init(struct radeon_device *rdev); |
295 | void rs690_fini(struct radeon_device *rdev); | |
296 | int rs690_resume(struct radeon_device *rdev); | |
297 | int rs690_suspend(struct radeon_device *rdev); | |
771fe6b9 JG |
298 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
299 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
c93bb85b | 300 | void rs690_bandwidth_update(struct radeon_device *rdev); |
771fe6b9 | 301 | static struct radeon_asic rs690_asic = { |
3bc68535 JG |
302 | .init = &rs690_init, |
303 | .fini = &rs690_fini, | |
304 | .suspend = &rs690_suspend, | |
305 | .resume = &rs690_resume, | |
771fe6b9 | 306 | .gpu_reset = &r300_gpu_reset, |
771fe6b9 JG |
307 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
308 | .gart_set_page = &rs400_gart_set_page, | |
3ce0a23d | 309 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 310 | .ring_start = &r300_ring_start, |
3ce0a23d JG |
311 | .ring_test = &r100_ring_test, |
312 | .ring_ib_execute = &r100_ring_ib_execute, | |
771fe6b9 | 313 | .irq_set = &rs600_irq_set, |
7ed220d7 MD |
314 | .irq_process = &rs600_irq_process, |
315 | .get_vblank_counter = &rs600_get_vblank_counter, | |
771fe6b9 JG |
316 | .fence_ring_emit = &r300_fence_ring_emit, |
317 | .cs_parse = &r300_cs_parse, | |
318 | .copy_blit = &r100_copy_blit, | |
319 | .copy_dma = &r300_copy_dma, | |
320 | .copy = &r300_copy_dma, | |
7433874e | 321 | .get_engine_clock = &radeon_atom_get_engine_clock, |
771fe6b9 | 322 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 323 | .get_memory_clock = &radeon_atom_get_memory_clock, |
771fe6b9 JG |
324 | .set_memory_clock = &radeon_atom_set_memory_clock, |
325 | .set_pcie_lanes = NULL, | |
326 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
e024e110 DA |
327 | .set_surface_reg = r100_set_surface_reg, |
328 | .clear_surface_reg = r100_clear_surface_reg, | |
c93bb85b | 329 | .bandwidth_update = &rs690_bandwidth_update, |
771fe6b9 JG |
330 | }; |
331 | ||
332 | ||
333 | /* | |
334 | * rv515 | |
335 | */ | |
068a117c | 336 | int rv515_init(struct radeon_device *rdev); |
d39c3b89 | 337 | void rv515_fini(struct radeon_device *rdev); |
771fe6b9 | 338 | int rv515_gpu_reset(struct radeon_device *rdev); |
771fe6b9 JG |
339 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
340 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
341 | void rv515_ring_start(struct radeon_device *rdev); | |
342 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | |
343 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
c93bb85b | 344 | void rv515_bandwidth_update(struct radeon_device *rdev); |
d39c3b89 JG |
345 | int rv515_resume(struct radeon_device *rdev); |
346 | int rv515_suspend(struct radeon_device *rdev); | |
771fe6b9 | 347 | static struct radeon_asic rv515_asic = { |
068a117c | 348 | .init = &rv515_init, |
d39c3b89 JG |
349 | .fini = &rv515_fini, |
350 | .suspend = &rv515_suspend, | |
351 | .resume = &rv515_resume, | |
771fe6b9 | 352 | .gpu_reset = &rv515_gpu_reset, |
771fe6b9 JG |
353 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
354 | .gart_set_page = &rv370_pcie_gart_set_page, | |
3ce0a23d | 355 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 356 | .ring_start = &rv515_ring_start, |
3ce0a23d JG |
357 | .ring_test = &r100_ring_test, |
358 | .ring_ib_execute = &r100_ring_ib_execute, | |
7ed220d7 MD |
359 | .irq_set = &rs600_irq_set, |
360 | .irq_process = &rs600_irq_process, | |
361 | .get_vblank_counter = &rs600_get_vblank_counter, | |
771fe6b9 | 362 | .fence_ring_emit = &r300_fence_ring_emit, |
068a117c | 363 | .cs_parse = &r300_cs_parse, |
771fe6b9 JG |
364 | .copy_blit = &r100_copy_blit, |
365 | .copy_dma = &r300_copy_dma, | |
366 | .copy = &r100_copy_blit, | |
7433874e | 367 | .get_engine_clock = &radeon_atom_get_engine_clock, |
771fe6b9 | 368 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 369 | .get_memory_clock = &radeon_atom_get_memory_clock, |
771fe6b9 JG |
370 | .set_memory_clock = &radeon_atom_set_memory_clock, |
371 | .set_pcie_lanes = &rv370_set_pcie_lanes, | |
372 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
e024e110 DA |
373 | .set_surface_reg = r100_set_surface_reg, |
374 | .clear_surface_reg = r100_clear_surface_reg, | |
c93bb85b | 375 | .bandwidth_update = &rv515_bandwidth_update, |
771fe6b9 JG |
376 | }; |
377 | ||
378 | ||
379 | /* | |
380 | * r520,rv530,rv560,rv570,r580 | |
381 | */ | |
d39c3b89 | 382 | int r520_init(struct radeon_device *rdev); |
f0ed1f65 | 383 | int r520_resume(struct radeon_device *rdev); |
771fe6b9 | 384 | static struct radeon_asic r520_asic = { |
d39c3b89 | 385 | .init = &r520_init, |
f0ed1f65 JG |
386 | .fini = &rv515_fini, |
387 | .suspend = &rv515_suspend, | |
388 | .resume = &r520_resume, | |
771fe6b9 | 389 | .gpu_reset = &rv515_gpu_reset, |
771fe6b9 JG |
390 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
391 | .gart_set_page = &rv370_pcie_gart_set_page, | |
3ce0a23d | 392 | .cp_commit = &r100_cp_commit, |
771fe6b9 | 393 | .ring_start = &rv515_ring_start, |
3ce0a23d JG |
394 | .ring_test = &r100_ring_test, |
395 | .ring_ib_execute = &r100_ring_ib_execute, | |
7ed220d7 MD |
396 | .irq_set = &rs600_irq_set, |
397 | .irq_process = &rs600_irq_process, | |
398 | .get_vblank_counter = &rs600_get_vblank_counter, | |
771fe6b9 | 399 | .fence_ring_emit = &r300_fence_ring_emit, |
068a117c | 400 | .cs_parse = &r300_cs_parse, |
771fe6b9 JG |
401 | .copy_blit = &r100_copy_blit, |
402 | .copy_dma = &r300_copy_dma, | |
403 | .copy = &r100_copy_blit, | |
7433874e | 404 | .get_engine_clock = &radeon_atom_get_engine_clock, |
771fe6b9 | 405 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 406 | .get_memory_clock = &radeon_atom_get_memory_clock, |
771fe6b9 JG |
407 | .set_memory_clock = &radeon_atom_set_memory_clock, |
408 | .set_pcie_lanes = &rv370_set_pcie_lanes, | |
409 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
e024e110 DA |
410 | .set_surface_reg = r100_set_surface_reg, |
411 | .clear_surface_reg = r100_clear_surface_reg, | |
f0ed1f65 | 412 | .bandwidth_update = &rv515_bandwidth_update, |
771fe6b9 JG |
413 | }; |
414 | ||
415 | /* | |
3ce0a23d | 416 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 |
771fe6b9 | 417 | */ |
3ce0a23d JG |
418 | int r600_init(struct radeon_device *rdev); |
419 | void r600_fini(struct radeon_device *rdev); | |
420 | int r600_suspend(struct radeon_device *rdev); | |
421 | int r600_resume(struct radeon_device *rdev); | |
422 | int r600_wb_init(struct radeon_device *rdev); | |
423 | void r600_wb_fini(struct radeon_device *rdev); | |
424 | void r600_cp_commit(struct radeon_device *rdev); | |
425 | void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | |
771fe6b9 JG |
426 | uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); |
427 | void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |
3ce0a23d JG |
428 | int r600_cs_parse(struct radeon_cs_parser *p); |
429 | void r600_fence_ring_emit(struct radeon_device *rdev, | |
430 | struct radeon_fence *fence); | |
431 | int r600_copy_dma(struct radeon_device *rdev, | |
432 | uint64_t src_offset, | |
433 | uint64_t dst_offset, | |
434 | unsigned num_pages, | |
435 | struct radeon_fence *fence); | |
436 | int r600_irq_process(struct radeon_device *rdev); | |
437 | int r600_irq_set(struct radeon_device *rdev); | |
438 | int r600_gpu_reset(struct radeon_device *rdev); | |
439 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |
440 | uint32_t tiling_flags, uint32_t pitch, | |
441 | uint32_t offset, uint32_t obj_size); | |
442 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); | |
443 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | |
3ce0a23d JG |
444 | int r600_ring_test(struct radeon_device *rdev); |
445 | int r600_copy_blit(struct radeon_device *rdev, | |
446 | uint64_t src_offset, uint64_t dst_offset, | |
447 | unsigned num_pages, struct radeon_fence *fence); | |
448 | ||
449 | static struct radeon_asic r600_asic = { | |
3ce0a23d JG |
450 | .init = &r600_init, |
451 | .fini = &r600_fini, | |
452 | .suspend = &r600_suspend, | |
453 | .resume = &r600_resume, | |
454 | .cp_commit = &r600_cp_commit, | |
3ce0a23d | 455 | .gpu_reset = &r600_gpu_reset, |
3ce0a23d JG |
456 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
457 | .gart_set_page = &rs600_gart_set_page, | |
3ce0a23d JG |
458 | .ring_test = &r600_ring_test, |
459 | .ring_ib_execute = &r600_ring_ib_execute, | |
3ce0a23d JG |
460 | .irq_set = &r600_irq_set, |
461 | .irq_process = &r600_irq_process, | |
462 | .fence_ring_emit = &r600_fence_ring_emit, | |
463 | .cs_parse = &r600_cs_parse, | |
464 | .copy_blit = &r600_copy_blit, | |
465 | .copy_dma = &r600_copy_blit, | |
a3812877 | 466 | .copy = &r600_copy_blit, |
7433874e | 467 | .get_engine_clock = &radeon_atom_get_engine_clock, |
3ce0a23d | 468 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 469 | .get_memory_clock = &radeon_atom_get_memory_clock, |
3ce0a23d JG |
470 | .set_memory_clock = &radeon_atom_set_memory_clock, |
471 | .set_pcie_lanes = NULL, | |
472 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
473 | .set_surface_reg = r600_set_surface_reg, | |
474 | .clear_surface_reg = r600_clear_surface_reg, | |
f0ed1f65 | 475 | .bandwidth_update = &rv515_bandwidth_update, |
3ce0a23d JG |
476 | }; |
477 | ||
478 | /* | |
479 | * rv770,rv730,rv710,rv740 | |
480 | */ | |
481 | int rv770_init(struct radeon_device *rdev); | |
482 | void rv770_fini(struct radeon_device *rdev); | |
483 | int rv770_suspend(struct radeon_device *rdev); | |
484 | int rv770_resume(struct radeon_device *rdev); | |
485 | int rv770_gpu_reset(struct radeon_device *rdev); | |
486 | ||
487 | static struct radeon_asic rv770_asic = { | |
3ce0a23d JG |
488 | .init = &rv770_init, |
489 | .fini = &rv770_fini, | |
490 | .suspend = &rv770_suspend, | |
491 | .resume = &rv770_resume, | |
492 | .cp_commit = &r600_cp_commit, | |
3ce0a23d | 493 | .gpu_reset = &rv770_gpu_reset, |
3ce0a23d JG |
494 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
495 | .gart_set_page = &rs600_gart_set_page, | |
3ce0a23d JG |
496 | .ring_test = &r600_ring_test, |
497 | .ring_ib_execute = &r600_ring_ib_execute, | |
3ce0a23d JG |
498 | .irq_set = &r600_irq_set, |
499 | .irq_process = &r600_irq_process, | |
500 | .fence_ring_emit = &r600_fence_ring_emit, | |
501 | .cs_parse = &r600_cs_parse, | |
502 | .copy_blit = &r600_copy_blit, | |
503 | .copy_dma = &r600_copy_blit, | |
a3812877 | 504 | .copy = &r600_copy_blit, |
7433874e | 505 | .get_engine_clock = &radeon_atom_get_engine_clock, |
3ce0a23d | 506 | .set_engine_clock = &radeon_atom_set_engine_clock, |
7433874e | 507 | .get_memory_clock = &radeon_atom_get_memory_clock, |
3ce0a23d JG |
508 | .set_memory_clock = &radeon_atom_set_memory_clock, |
509 | .set_pcie_lanes = NULL, | |
510 | .set_clock_gating = &radeon_atom_set_clock_gating, | |
511 | .set_surface_reg = r600_set_surface_reg, | |
512 | .clear_surface_reg = r600_clear_surface_reg, | |
f0ed1f65 | 513 | .bandwidth_update = &rv515_bandwidth_update, |
3ce0a23d | 514 | }; |
771fe6b9 JG |
515 | |
516 | #endif |