]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/thunderbolt/eeprom.c
selftests: timers: freq-step: fix compile error
[mirror_ubuntu-artful-kernel.git] / drivers / thunderbolt / eeprom.c
1 /*
2 * Thunderbolt Cactus Ridge driver - eeprom access
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7 #include <linux/crc32.h>
8 #include <linux/property.h>
9 #include <linux/slab.h>
10 #include "tb.h"
11
12 /**
13 * tb_eeprom_ctl_write() - write control word
14 */
15 static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
16 {
17 return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
18 }
19
20 /**
21 * tb_eeprom_ctl_write() - read control word
22 */
23 static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
24 {
25 return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
26 }
27
28 enum tb_eeprom_transfer {
29 TB_EEPROM_IN,
30 TB_EEPROM_OUT,
31 };
32
33 /**
34 * tb_eeprom_active - enable rom access
35 *
36 * WARNING: Always disable access after usage. Otherwise the controller will
37 * fail to reprobe.
38 */
39 static int tb_eeprom_active(struct tb_switch *sw, bool enable)
40 {
41 struct tb_eeprom_ctl ctl;
42 int res = tb_eeprom_ctl_read(sw, &ctl);
43 if (res)
44 return res;
45 if (enable) {
46 ctl.access_high = 1;
47 res = tb_eeprom_ctl_write(sw, &ctl);
48 if (res)
49 return res;
50 ctl.access_low = 0;
51 return tb_eeprom_ctl_write(sw, &ctl);
52 } else {
53 ctl.access_low = 1;
54 res = tb_eeprom_ctl_write(sw, &ctl);
55 if (res)
56 return res;
57 ctl.access_high = 0;
58 return tb_eeprom_ctl_write(sw, &ctl);
59 }
60 }
61
62 /**
63 * tb_eeprom_transfer - transfer one bit
64 *
65 * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
66 * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
67 */
68 static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
69 enum tb_eeprom_transfer direction)
70 {
71 int res;
72 if (direction == TB_EEPROM_OUT) {
73 res = tb_eeprom_ctl_write(sw, ctl);
74 if (res)
75 return res;
76 }
77 ctl->clock = 1;
78 res = tb_eeprom_ctl_write(sw, ctl);
79 if (res)
80 return res;
81 if (direction == TB_EEPROM_IN) {
82 res = tb_eeprom_ctl_read(sw, ctl);
83 if (res)
84 return res;
85 }
86 ctl->clock = 0;
87 return tb_eeprom_ctl_write(sw, ctl);
88 }
89
90 /**
91 * tb_eeprom_out - write one byte to the bus
92 */
93 static int tb_eeprom_out(struct tb_switch *sw, u8 val)
94 {
95 struct tb_eeprom_ctl ctl;
96 int i;
97 int res = tb_eeprom_ctl_read(sw, &ctl);
98 if (res)
99 return res;
100 for (i = 0; i < 8; i++) {
101 ctl.data_out = val & 0x80;
102 res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
103 if (res)
104 return res;
105 val <<= 1;
106 }
107 return 0;
108 }
109
110 /**
111 * tb_eeprom_in - read one byte from the bus
112 */
113 static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
114 {
115 struct tb_eeprom_ctl ctl;
116 int i;
117 int res = tb_eeprom_ctl_read(sw, &ctl);
118 if (res)
119 return res;
120 *val = 0;
121 for (i = 0; i < 8; i++) {
122 *val <<= 1;
123 res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
124 if (res)
125 return res;
126 *val |= ctl.data_in;
127 }
128 return 0;
129 }
130
131 /**
132 * tb_eeprom_read_n - read count bytes from offset into val
133 */
134 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
135 size_t count)
136 {
137 int i, res;
138 res = tb_eeprom_active(sw, true);
139 if (res)
140 return res;
141 res = tb_eeprom_out(sw, 3);
142 if (res)
143 return res;
144 res = tb_eeprom_out(sw, offset >> 8);
145 if (res)
146 return res;
147 res = tb_eeprom_out(sw, offset);
148 if (res)
149 return res;
150 for (i = 0; i < count; i++) {
151 res = tb_eeprom_in(sw, val + i);
152 if (res)
153 return res;
154 }
155 return tb_eeprom_active(sw, false);
156 }
157
158 static u8 tb_crc8(u8 *data, int len)
159 {
160 int i, j;
161 u8 val = 0xff;
162 for (i = 0; i < len; i++) {
163 val ^= data[i];
164 for (j = 0; j < 8; j++)
165 val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
166 }
167 return val;
168 }
169
170 static u32 tb_crc32(void *data, size_t len)
171 {
172 return ~__crc32c_le(~0, data, len);
173 }
174
175 #define TB_DROM_DATA_START 13
176 struct tb_drom_header {
177 /* BYTE 0 */
178 u8 uid_crc8; /* checksum for uid */
179 /* BYTES 1-8 */
180 u64 uid;
181 /* BYTES 9-12 */
182 u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
183 /* BYTE 13 */
184 u8 device_rom_revision; /* should be <= 1 */
185 u16 data_len:10;
186 u8 __unknown1:6;
187 /* BYTES 16-21 */
188 u16 vendor_id;
189 u16 model_id;
190 u8 model_rev;
191 u8 eeprom_rev;
192 } __packed;
193
194 enum tb_drom_entry_type {
195 /* force unsigned to prevent "one-bit signed bitfield" warning */
196 TB_DROM_ENTRY_GENERIC = 0U,
197 TB_DROM_ENTRY_PORT,
198 };
199
200 struct tb_drom_entry_header {
201 u8 len;
202 u8 index:6;
203 bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
204 enum tb_drom_entry_type type:1;
205 } __packed;
206
207 struct tb_drom_entry_generic {
208 struct tb_drom_entry_header header;
209 u8 data[0];
210 } __packed;
211
212 struct tb_drom_entry_port {
213 /* BYTES 0-1 */
214 struct tb_drom_entry_header header;
215 /* BYTE 2 */
216 u8 dual_link_port_rid:4;
217 u8 link_nr:1;
218 u8 unknown1:2;
219 bool has_dual_link_port:1;
220
221 /* BYTE 3 */
222 u8 dual_link_port_nr:6;
223 u8 unknown2:2;
224
225 /* BYTES 4 - 5 TODO decode */
226 u8 micro2:4;
227 u8 micro1:4;
228 u8 micro3;
229
230 /* BYTES 6-7, TODO: verify (find hardware that has these set) */
231 u8 peer_port_rid:4;
232 u8 unknown3:3;
233 bool has_peer_port:1;
234 u8 peer_port_nr:6;
235 u8 unknown4:2;
236 } __packed;
237
238
239 /**
240 * tb_eeprom_get_drom_offset - get drom offset within eeprom
241 */
242 static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
243 {
244 struct tb_cap_plug_events cap;
245 int res;
246 if (!sw->cap_plug_events) {
247 tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
248 return -ENOSYS;
249 }
250 res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
251 sizeof(cap) / 4);
252 if (res)
253 return res;
254
255 if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
256 tb_sw_warn(sw, "no NVM\n");
257 return -ENOSYS;
258 }
259
260 if (cap.drom_offset > 0xffff) {
261 tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
262 cap.drom_offset);
263 return -ENXIO;
264 }
265 *offset = cap.drom_offset;
266 return 0;
267 }
268
269 /**
270 * tb_drom_read_uid_only - read uid directly from drom
271 *
272 * Does not use the cached copy in sw->drom. Used during resume to check switch
273 * identity.
274 */
275 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
276 {
277 u8 data[9];
278 u16 drom_offset;
279 u8 crc;
280 int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
281 if (res)
282 return res;
283
284 if (drom_offset == 0)
285 return -ENODEV;
286
287 /* read uid */
288 res = tb_eeprom_read_n(sw, drom_offset, data, 9);
289 if (res)
290 return res;
291
292 crc = tb_crc8(data + 1, 8);
293 if (crc != data[0]) {
294 tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
295 data[0], crc);
296 return -EIO;
297 }
298
299 *uid = *(u64 *)(data+1);
300 return 0;
301 }
302
303 static int tb_drom_parse_entry_generic(struct tb_switch *sw,
304 struct tb_drom_entry_header *header)
305 {
306 const struct tb_drom_entry_generic *entry =
307 (const struct tb_drom_entry_generic *)header;
308
309 switch (header->index) {
310 case 1:
311 /* Length includes 2 bytes header so remove it before copy */
312 sw->vendor_name = kstrndup(entry->data,
313 header->len - sizeof(*header), GFP_KERNEL);
314 if (!sw->vendor_name)
315 return -ENOMEM;
316 break;
317
318 case 2:
319 sw->device_name = kstrndup(entry->data,
320 header->len - sizeof(*header), GFP_KERNEL);
321 if (!sw->device_name)
322 return -ENOMEM;
323 break;
324 }
325
326 return 0;
327 }
328
329 static int tb_drom_parse_entry_port(struct tb_switch *sw,
330 struct tb_drom_entry_header *header)
331 {
332 struct tb_port *port;
333 int res;
334 enum tb_port_type type;
335
336 port = &sw->ports[header->index];
337 port->disabled = header->port_disabled;
338 if (port->disabled)
339 return 0;
340
341 res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
342 if (res)
343 return res;
344 type &= 0xffffff;
345
346 if (type == TB_TYPE_PORT) {
347 struct tb_drom_entry_port *entry = (void *) header;
348 if (header->len != sizeof(*entry)) {
349 tb_sw_warn(sw,
350 "port entry has size %#x (expected %#zx)\n",
351 header->len, sizeof(struct tb_drom_entry_port));
352 return -EIO;
353 }
354 port->link_nr = entry->link_nr;
355 if (entry->has_dual_link_port)
356 port->dual_link_port =
357 &port->sw->ports[entry->dual_link_port_nr];
358 }
359 return 0;
360 }
361
362 /**
363 * tb_drom_parse_entries - parse the linked list of drom entries
364 *
365 * Drom must have been copied to sw->drom.
366 */
367 static int tb_drom_parse_entries(struct tb_switch *sw)
368 {
369 struct tb_drom_header *header = (void *) sw->drom;
370 u16 pos = sizeof(*header);
371 u16 drom_size = header->data_len + TB_DROM_DATA_START;
372 int res;
373
374 while (pos < drom_size) {
375 struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
376 if (pos + 1 == drom_size || pos + entry->len > drom_size
377 || !entry->len) {
378 tb_sw_warn(sw, "drom buffer overrun, aborting\n");
379 return -EIO;
380 }
381
382 switch (entry->type) {
383 case TB_DROM_ENTRY_GENERIC:
384 res = tb_drom_parse_entry_generic(sw, entry);
385 break;
386 case TB_DROM_ENTRY_PORT:
387 res = tb_drom_parse_entry_port(sw, entry);
388 break;
389 }
390 if (res)
391 return res;
392
393 pos += entry->len;
394 }
395 return 0;
396 }
397
398 /**
399 * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
400 */
401 static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
402 {
403 struct device *dev = &sw->tb->nhi->pdev->dev;
404 int len, res;
405
406 len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
407 if (len < 0 || len < sizeof(struct tb_drom_header))
408 return -EINVAL;
409
410 sw->drom = kmalloc(len, GFP_KERNEL);
411 if (!sw->drom)
412 return -ENOMEM;
413
414 res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
415 len);
416 if (res)
417 goto err;
418
419 *size = ((struct tb_drom_header *)sw->drom)->data_len +
420 TB_DROM_DATA_START;
421 if (*size > len)
422 goto err;
423
424 return 0;
425
426 err:
427 kfree(sw->drom);
428 sw->drom = NULL;
429 return -EINVAL;
430 }
431
432 static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
433 {
434 u32 drom_offset;
435 int ret;
436
437 if (!sw->dma_port)
438 return -ENODEV;
439
440 ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
441 sw->cap_plug_events + 12, 1);
442 if (ret)
443 return ret;
444
445 if (!drom_offset)
446 return -ENODEV;
447
448 ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
449 sizeof(*size));
450 if (ret)
451 return ret;
452
453 /* Size includes CRC8 + UID + CRC32 */
454 *size += 1 + 8 + 4;
455 sw->drom = kzalloc(*size, GFP_KERNEL);
456 if (!sw->drom)
457 return -ENOMEM;
458
459 ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
460 if (ret)
461 goto err_free;
462
463 /*
464 * Read UID from the minimal DROM because the one in NVM is just
465 * a placeholder.
466 */
467 tb_drom_read_uid_only(sw, &sw->uid);
468 return 0;
469
470 err_free:
471 kfree(sw->drom);
472 sw->drom = NULL;
473 return ret;
474 }
475
476 /**
477 * tb_drom_read - copy drom to sw->drom and parse it
478 */
479 int tb_drom_read(struct tb_switch *sw)
480 {
481 u16 drom_offset;
482 u16 size;
483 u32 crc;
484 struct tb_drom_header *header;
485 int res;
486 if (sw->drom)
487 return 0;
488
489 if (tb_route(sw) == 0) {
490 /*
491 * Apple's NHI EFI driver supplies a DROM for the root switch
492 * in a device property. Use it if available.
493 */
494 if (tb_drom_copy_efi(sw, &size) == 0)
495 goto parse;
496
497 /* Non-Apple hardware has the DROM as part of NVM */
498 if (tb_drom_copy_nvm(sw, &size) == 0)
499 goto parse;
500
501 /*
502 * The root switch contains only a dummy drom (header only,
503 * no entries). Hardcode the configuration here.
504 */
505 tb_drom_read_uid_only(sw, &sw->uid);
506
507 sw->ports[1].link_nr = 0;
508 sw->ports[2].link_nr = 1;
509 sw->ports[1].dual_link_port = &sw->ports[2];
510 sw->ports[2].dual_link_port = &sw->ports[1];
511
512 sw->ports[3].link_nr = 0;
513 sw->ports[4].link_nr = 1;
514 sw->ports[3].dual_link_port = &sw->ports[4];
515 sw->ports[4].dual_link_port = &sw->ports[3];
516
517 /* Port 5 is inaccessible on this gen 1 controller */
518 if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
519 sw->ports[5].disabled = true;
520
521 return 0;
522 }
523
524 res = tb_eeprom_get_drom_offset(sw, &drom_offset);
525 if (res)
526 return res;
527
528 res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
529 if (res)
530 return res;
531 size &= 0x3ff;
532 size += TB_DROM_DATA_START;
533 tb_sw_info(sw, "reading drom (length: %#x)\n", size);
534 if (size < sizeof(*header)) {
535 tb_sw_warn(sw, "drom too small, aborting\n");
536 return -EIO;
537 }
538
539 sw->drom = kzalloc(size, GFP_KERNEL);
540 if (!sw->drom)
541 return -ENOMEM;
542 res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
543 if (res)
544 goto err;
545
546 parse:
547 header = (void *) sw->drom;
548
549 if (header->data_len + TB_DROM_DATA_START != size) {
550 tb_sw_warn(sw, "drom size mismatch, aborting\n");
551 goto err;
552 }
553
554 crc = tb_crc8((u8 *) &header->uid, 8);
555 if (crc != header->uid_crc8) {
556 tb_sw_warn(sw,
557 "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
558 header->uid_crc8, crc);
559 goto err;
560 }
561 if (!sw->uid)
562 sw->uid = header->uid;
563 sw->vendor = header->vendor_id;
564 sw->device = header->model_id;
565
566 crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
567 if (crc != header->data_crc32) {
568 tb_sw_warn(sw,
569 "drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
570 header->data_crc32, crc);
571 }
572
573 if (header->device_rom_revision > 2)
574 tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
575 header->device_rom_revision);
576
577 return tb_drom_parse_entries(sw);
578 err:
579 kfree(sw->drom);
580 sw->drom = NULL;
581 return -EIO;
582
583 }