]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/spi-nor/intel-spi.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / spi-nor / intel-spi.c
1 /*
2 * Intel PCH/PCU SPI flash driver.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/sizes.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/mtd/spi-nor.h>
21 #include <linux/platform_data/intel-spi.h>
22
23 #include "intel-spi.h"
24
25 /* Offsets are from @ispi->base */
26 #define BFPREG 0x00
27
28 #define HSFSTS_CTL 0x04
29 #define HSFSTS_CTL_FSMIE BIT(31)
30 #define HSFSTS_CTL_FDBC_SHIFT 24
31 #define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
32
33 #define HSFSTS_CTL_FCYCLE_SHIFT 17
34 #define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
35 /* HW sequencer opcodes */
36 #define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
40 #define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
41 #define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
42 #define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
43
44 #define HSFSTS_CTL_FGO BIT(16)
45 #define HSFSTS_CTL_FLOCKDN BIT(15)
46 #define HSFSTS_CTL_FDV BIT(14)
47 #define HSFSTS_CTL_SCIP BIT(5)
48 #define HSFSTS_CTL_AEL BIT(2)
49 #define HSFSTS_CTL_FCERR BIT(1)
50 #define HSFSTS_CTL_FDONE BIT(0)
51
52 #define FADDR 0x08
53 #define DLOCK 0x0c
54 #define FDATA(n) (0x10 + ((n) * 4))
55
56 #define FRACC 0x50
57
58 #define FREG(n) (0x54 + ((n) * 4))
59 #define FREG_BASE_MASK 0x3fff
60 #define FREG_LIMIT_SHIFT 16
61 #define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
62
63 /* Offset is from @ispi->pregs */
64 #define PR(n) ((n) * 4)
65 #define PR_WPE BIT(31)
66 #define PR_LIMIT_SHIFT 16
67 #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
68 #define PR_RPE BIT(15)
69 #define PR_BASE_MASK 0x3fff
70 /* Last PR is GPR0 */
71 #define PR_NUM (5 + 1)
72
73 /* Offsets are from @ispi->sregs */
74 #define SSFSTS_CTL 0x00
75 #define SSFSTS_CTL_FSMIE BIT(23)
76 #define SSFSTS_CTL_DS BIT(22)
77 #define SSFSTS_CTL_DBC_SHIFT 16
78 #define SSFSTS_CTL_SPOP BIT(11)
79 #define SSFSTS_CTL_ACS BIT(10)
80 #define SSFSTS_CTL_SCGO BIT(9)
81 #define SSFSTS_CTL_COP_SHIFT 12
82 #define SSFSTS_CTL_FRS BIT(7)
83 #define SSFSTS_CTL_DOFRS BIT(6)
84 #define SSFSTS_CTL_AEL BIT(4)
85 #define SSFSTS_CTL_FCERR BIT(3)
86 #define SSFSTS_CTL_FDONE BIT(2)
87 #define SSFSTS_CTL_SCIP BIT(0)
88
89 #define PREOP_OPTYPE 0x04
90 #define OPMENU0 0x08
91 #define OPMENU1 0x0c
92
93 /* CPU specifics */
94 #define BYT_PR 0x74
95 #define BYT_SSFSTS_CTL 0x90
96 #define BYT_BCR 0xfc
97 #define BYT_BCR_WPD BIT(0)
98 #define BYT_FREG_NUM 5
99
100 #define LPT_PR 0x74
101 #define LPT_SSFSTS_CTL 0x90
102 #define LPT_FREG_NUM 5
103
104 #define BXT_PR 0x84
105 #define BXT_SSFSTS_CTL 0xa0
106 #define BXT_FREG_NUM 12
107
108 #define INTEL_SPI_TIMEOUT 5000 /* ms */
109 #define INTEL_SPI_FIFO_SZ 64
110
111 /**
112 * struct intel_spi - Driver private data
113 * @dev: Device pointer
114 * @info: Pointer to board specific info
115 * @nor: SPI NOR layer structure
116 * @base: Beginning of MMIO space
117 * @pregs: Start of protection registers
118 * @sregs: Start of software sequencer registers
119 * @nregions: Maximum number of regions
120 * @writeable: Is the chip writeable
121 * @swseq: Use SW sequencer in register reads/writes
122 * @erase_64k: 64k erase supported
123 * @opcodes: Opcodes which are supported. This are programmed by BIOS
124 * before it locks down the controller.
125 * @preopcodes: Preopcodes which are supported.
126 */
127 struct intel_spi {
128 struct device *dev;
129 const struct intel_spi_boardinfo *info;
130 struct spi_nor nor;
131 void __iomem *base;
132 void __iomem *pregs;
133 void __iomem *sregs;
134 size_t nregions;
135 bool writeable;
136 bool swseq;
137 bool erase_64k;
138 u8 opcodes[8];
139 u8 preopcodes[2];
140 };
141
142 static bool writeable;
143 module_param(writeable, bool, 0);
144 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
145
146 static void intel_spi_dump_regs(struct intel_spi *ispi)
147 {
148 u32 value;
149 int i;
150
151 dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
152
153 value = readl(ispi->base + HSFSTS_CTL);
154 dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
155 if (value & HSFSTS_CTL_FLOCKDN)
156 dev_dbg(ispi->dev, "-> Locked\n");
157
158 dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
159 dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
160
161 for (i = 0; i < 16; i++)
162 dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
163 i, readl(ispi->base + FDATA(i)));
164
165 dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
166
167 for (i = 0; i < ispi->nregions; i++)
168 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
169 readl(ispi->base + FREG(i)));
170 for (i = 0; i < PR_NUM; i++)
171 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
172 readl(ispi->pregs + PR(i)));
173
174 value = readl(ispi->sregs + SSFSTS_CTL);
175 dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
176 dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
177 readl(ispi->sregs + PREOP_OPTYPE));
178 dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
179 dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
180
181 if (ispi->info->type == INTEL_SPI_BYT)
182 dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
183
184 dev_dbg(ispi->dev, "Protected regions:\n");
185 for (i = 0; i < PR_NUM; i++) {
186 u32 base, limit;
187
188 value = readl(ispi->pregs + PR(i));
189 if (!(value & (PR_WPE | PR_RPE)))
190 continue;
191
192 limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
193 base = value & PR_BASE_MASK;
194
195 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
196 i, base << 12, (limit << 12) | 0xfff,
197 value & PR_WPE ? 'W' : '.',
198 value & PR_RPE ? 'R' : '.');
199 }
200
201 dev_dbg(ispi->dev, "Flash regions:\n");
202 for (i = 0; i < ispi->nregions; i++) {
203 u32 region, base, limit;
204
205 region = readl(ispi->base + FREG(i));
206 base = region & FREG_BASE_MASK;
207 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
208
209 if (base >= limit || (i > 0 && limit == 0))
210 dev_dbg(ispi->dev, " %02d disabled\n", i);
211 else
212 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
213 i, base << 12, (limit << 12) | 0xfff);
214 }
215
216 dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
217 ispi->swseq ? 'S' : 'H');
218 }
219
220 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
221 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
222 {
223 size_t bytes;
224 int i = 0;
225
226 if (size > INTEL_SPI_FIFO_SZ)
227 return -EINVAL;
228
229 while (size > 0) {
230 bytes = min_t(size_t, size, 4);
231 memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
232 size -= bytes;
233 buf += bytes;
234 i++;
235 }
236
237 return 0;
238 }
239
240 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
241 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
242 size_t size)
243 {
244 size_t bytes;
245 int i = 0;
246
247 if (size > INTEL_SPI_FIFO_SZ)
248 return -EINVAL;
249
250 while (size > 0) {
251 bytes = min_t(size_t, size, 4);
252 memcpy_toio(ispi->base + FDATA(i), buf, bytes);
253 size -= bytes;
254 buf += bytes;
255 i++;
256 }
257
258 return 0;
259 }
260
261 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
262 {
263 u32 val;
264
265 return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
266 !(val & HSFSTS_CTL_SCIP), 0,
267 INTEL_SPI_TIMEOUT * 1000);
268 }
269
270 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
271 {
272 u32 val;
273
274 return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
275 !(val & SSFSTS_CTL_SCIP), 0,
276 INTEL_SPI_TIMEOUT * 1000);
277 }
278
279 static int intel_spi_init(struct intel_spi *ispi)
280 {
281 u32 opmenu0, opmenu1, val;
282 int i;
283
284 switch (ispi->info->type) {
285 case INTEL_SPI_BYT:
286 ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
287 ispi->pregs = ispi->base + BYT_PR;
288 ispi->nregions = BYT_FREG_NUM;
289
290 if (writeable) {
291 /* Disable write protection */
292 val = readl(ispi->base + BYT_BCR);
293 if (!(val & BYT_BCR_WPD)) {
294 val |= BYT_BCR_WPD;
295 writel(val, ispi->base + BYT_BCR);
296 val = readl(ispi->base + BYT_BCR);
297 }
298
299 ispi->writeable = !!(val & BYT_BCR_WPD);
300 }
301
302 break;
303
304 case INTEL_SPI_LPT:
305 ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
306 ispi->pregs = ispi->base + LPT_PR;
307 ispi->nregions = LPT_FREG_NUM;
308 break;
309
310 case INTEL_SPI_BXT:
311 ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
312 ispi->pregs = ispi->base + BXT_PR;
313 ispi->nregions = BXT_FREG_NUM;
314 ispi->erase_64k = true;
315 break;
316
317 default:
318 return -EINVAL;
319 }
320
321 /* Disable #SMI generation */
322 val = readl(ispi->base + HSFSTS_CTL);
323 val &= ~HSFSTS_CTL_FSMIE;
324 writel(val, ispi->base + HSFSTS_CTL);
325
326 /*
327 * BIOS programs allowed opcodes and then locks down the register.
328 * So read back what opcodes it decided to support. That's the set
329 * we are going to support as well.
330 */
331 opmenu0 = readl(ispi->sregs + OPMENU0);
332 opmenu1 = readl(ispi->sregs + OPMENU1);
333
334 /*
335 * Some controllers can only do basic operations using hardware
336 * sequencer. All other operations are supposed to be carried out
337 * using software sequencer. If we find that BIOS has programmed
338 * opcodes for the software sequencer we use that over the hardware
339 * sequencer.
340 */
341 if (opmenu0 && opmenu1) {
342 for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
343 ispi->opcodes[i] = opmenu0 >> i * 8;
344 ispi->opcodes[i + 4] = opmenu1 >> i * 8;
345 }
346
347 val = readl(ispi->sregs + PREOP_OPTYPE);
348 ispi->preopcodes[0] = val;
349 ispi->preopcodes[1] = val >> 8;
350
351 /* Disable #SMI generation from SW sequencer */
352 val = readl(ispi->sregs + SSFSTS_CTL);
353 val &= ~SSFSTS_CTL_FSMIE;
354 writel(val, ispi->sregs + SSFSTS_CTL);
355
356 ispi->swseq = true;
357 }
358
359 intel_spi_dump_regs(ispi);
360
361 return 0;
362 }
363
364 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
365 {
366 int i;
367
368 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
369 if (ispi->opcodes[i] == opcode)
370 return i;
371 return -EINVAL;
372 }
373
374 static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
375 int len)
376 {
377 u32 val, status;
378 int ret;
379
380 val = readl(ispi->base + HSFSTS_CTL);
381 val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
382
383 switch (opcode) {
384 case SPINOR_OP_RDID:
385 val |= HSFSTS_CTL_FCYCLE_RDID;
386 break;
387 case SPINOR_OP_WRSR:
388 val |= HSFSTS_CTL_FCYCLE_WRSR;
389 break;
390 case SPINOR_OP_RDSR:
391 val |= HSFSTS_CTL_FCYCLE_RDSR;
392 break;
393 default:
394 return -EINVAL;
395 }
396
397 val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
398 val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
399 val |= HSFSTS_CTL_FGO;
400 writel(val, ispi->base + HSFSTS_CTL);
401
402 ret = intel_spi_wait_hw_busy(ispi);
403 if (ret)
404 return ret;
405
406 status = readl(ispi->base + HSFSTS_CTL);
407 if (status & HSFSTS_CTL_FCERR)
408 return -EIO;
409 else if (status & HSFSTS_CTL_AEL)
410 return -EACCES;
411
412 return 0;
413 }
414
415 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
416 int len)
417 {
418 u32 val, status;
419 int ret;
420
421 ret = intel_spi_opcode_index(ispi, opcode);
422 if (ret < 0)
423 return ret;
424
425 val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
426 val |= ret << SSFSTS_CTL_COP_SHIFT;
427 val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
428 val |= SSFSTS_CTL_SCGO;
429 writel(val, ispi->sregs + SSFSTS_CTL);
430
431 ret = intel_spi_wait_sw_busy(ispi);
432 if (ret)
433 return ret;
434
435 status = readl(ispi->base + SSFSTS_CTL);
436 if (status & SSFSTS_CTL_FCERR)
437 return -EIO;
438 else if (status & SSFSTS_CTL_AEL)
439 return -EACCES;
440
441 return 0;
442 }
443
444 static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
445 {
446 struct intel_spi *ispi = nor->priv;
447 int ret;
448
449 /* Address of the first chip */
450 writel(0, ispi->base + FADDR);
451
452 if (ispi->swseq)
453 ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
454 else
455 ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
456
457 if (ret)
458 return ret;
459
460 return intel_spi_read_block(ispi, buf, len);
461 }
462
463 static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
464 {
465 struct intel_spi *ispi = nor->priv;
466 int ret;
467
468 /*
469 * This is handled with atomic operation and preop code in Intel
470 * controller so skip it here now.
471 */
472 if (opcode == SPINOR_OP_WREN)
473 return 0;
474
475 writel(0, ispi->base + FADDR);
476
477 /* Write the value beforehand */
478 ret = intel_spi_write_block(ispi, buf, len);
479 if (ret)
480 return ret;
481
482 if (ispi->swseq)
483 return intel_spi_sw_cycle(ispi, opcode, buf, len);
484 return intel_spi_hw_cycle(ispi, opcode, buf, len);
485 }
486
487 static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
488 u_char *read_buf)
489 {
490 struct intel_spi *ispi = nor->priv;
491 size_t block_size, retlen = 0;
492 u32 val, status;
493 ssize_t ret;
494
495 switch (nor->read_opcode) {
496 case SPINOR_OP_READ:
497 case SPINOR_OP_READ_FAST:
498 break;
499 default:
500 return -EINVAL;
501 }
502
503 while (len > 0) {
504 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
505
506 writel(from, ispi->base + FADDR);
507
508 val = readl(ispi->base + HSFSTS_CTL);
509 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
510 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
511 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
512 val |= HSFSTS_CTL_FCYCLE_READ;
513 val |= HSFSTS_CTL_FGO;
514 writel(val, ispi->base + HSFSTS_CTL);
515
516 ret = intel_spi_wait_hw_busy(ispi);
517 if (ret)
518 return ret;
519
520 status = readl(ispi->base + HSFSTS_CTL);
521 if (status & HSFSTS_CTL_FCERR)
522 ret = -EIO;
523 else if (status & HSFSTS_CTL_AEL)
524 ret = -EACCES;
525
526 if (ret < 0) {
527 dev_err(ispi->dev, "read error: %llx: %#x\n", from,
528 status);
529 return ret;
530 }
531
532 ret = intel_spi_read_block(ispi, read_buf, block_size);
533 if (ret)
534 return ret;
535
536 len -= block_size;
537 from += block_size;
538 retlen += block_size;
539 read_buf += block_size;
540 }
541
542 return retlen;
543 }
544
545 static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
546 const u_char *write_buf)
547 {
548 struct intel_spi *ispi = nor->priv;
549 size_t block_size, retlen = 0;
550 u32 val, status;
551 ssize_t ret;
552
553 while (len > 0) {
554 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
555
556 writel(to, ispi->base + FADDR);
557
558 val = readl(ispi->base + HSFSTS_CTL);
559 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
560 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
561 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
562 val |= HSFSTS_CTL_FCYCLE_WRITE;
563
564 /* Write enable */
565 if (ispi->preopcodes[1] == SPINOR_OP_WREN)
566 val |= SSFSTS_CTL_SPOP;
567 val |= SSFSTS_CTL_ACS;
568 writel(val, ispi->base + HSFSTS_CTL);
569
570 ret = intel_spi_write_block(ispi, write_buf, block_size);
571 if (ret) {
572 dev_err(ispi->dev, "failed to write block\n");
573 return ret;
574 }
575
576 /* Start the write now */
577 val = readl(ispi->base + HSFSTS_CTL);
578 writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
579
580 ret = intel_spi_wait_hw_busy(ispi);
581 if (ret) {
582 dev_err(ispi->dev, "timeout\n");
583 return ret;
584 }
585
586 status = readl(ispi->base + HSFSTS_CTL);
587 if (status & HSFSTS_CTL_FCERR)
588 ret = -EIO;
589 else if (status & HSFSTS_CTL_AEL)
590 ret = -EACCES;
591
592 if (ret < 0) {
593 dev_err(ispi->dev, "write error: %llx: %#x\n", to,
594 status);
595 return ret;
596 }
597
598 len -= block_size;
599 to += block_size;
600 retlen += block_size;
601 write_buf += block_size;
602 }
603
604 return retlen;
605 }
606
607 static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
608 {
609 size_t erase_size, len = nor->mtd.erasesize;
610 struct intel_spi *ispi = nor->priv;
611 u32 val, status, cmd;
612 int ret;
613
614 /* If the hardware can do 64k erase use that when possible */
615 if (len >= SZ_64K && ispi->erase_64k) {
616 cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
617 erase_size = SZ_64K;
618 } else {
619 cmd = HSFSTS_CTL_FCYCLE_ERASE;
620 erase_size = SZ_4K;
621 }
622
623 while (len > 0) {
624 writel(offs, ispi->base + FADDR);
625
626 val = readl(ispi->base + HSFSTS_CTL);
627 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
628 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
629 val |= cmd;
630 val |= HSFSTS_CTL_FGO;
631 writel(val, ispi->base + HSFSTS_CTL);
632
633 ret = intel_spi_wait_hw_busy(ispi);
634 if (ret)
635 return ret;
636
637 status = readl(ispi->base + HSFSTS_CTL);
638 if (status & HSFSTS_CTL_FCERR)
639 return -EIO;
640 else if (status & HSFSTS_CTL_AEL)
641 return -EACCES;
642
643 offs += erase_size;
644 len -= erase_size;
645 }
646
647 return 0;
648 }
649
650 static bool intel_spi_is_protected(const struct intel_spi *ispi,
651 unsigned int base, unsigned int limit)
652 {
653 int i;
654
655 for (i = 0; i < PR_NUM; i++) {
656 u32 pr_base, pr_limit, pr_value;
657
658 pr_value = readl(ispi->pregs + PR(i));
659 if (!(pr_value & (PR_WPE | PR_RPE)))
660 continue;
661
662 pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
663 pr_base = pr_value & PR_BASE_MASK;
664
665 if (pr_base >= base && pr_limit <= limit)
666 return true;
667 }
668
669 return false;
670 }
671
672 /*
673 * There will be a single partition holding all enabled flash regions. We
674 * call this "BIOS".
675 */
676 static void intel_spi_fill_partition(struct intel_spi *ispi,
677 struct mtd_partition *part)
678 {
679 u64 end;
680 int i;
681
682 memset(part, 0, sizeof(*part));
683
684 /* Start from the mandatory descriptor region */
685 part->size = 4096;
686 part->name = "BIOS";
687
688 /*
689 * Now try to find where this partition ends based on the flash
690 * region registers.
691 */
692 for (i = 1; i < ispi->nregions; i++) {
693 u32 region, base, limit;
694
695 region = readl(ispi->base + FREG(i));
696 base = region & FREG_BASE_MASK;
697 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
698
699 if (base >= limit || limit == 0)
700 continue;
701
702 /*
703 * If any of the regions have protection bits set, make the
704 * whole partition read-only to be on the safe side.
705 */
706 if (intel_spi_is_protected(ispi, base, limit))
707 ispi->writeable = false;
708
709 end = (limit << 12) + 4096;
710 if (end > part->size)
711 part->size = end;
712 }
713 }
714
715 struct intel_spi *intel_spi_probe(struct device *dev,
716 struct resource *mem, const struct intel_spi_boardinfo *info)
717 {
718 const struct spi_nor_hwcaps hwcaps = {
719 .mask = SNOR_HWCAPS_READ |
720 SNOR_HWCAPS_READ_FAST |
721 SNOR_HWCAPS_PP,
722 };
723 struct mtd_partition part;
724 struct intel_spi *ispi;
725 int ret;
726
727 if (!info || !mem)
728 return ERR_PTR(-EINVAL);
729
730 ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
731 if (!ispi)
732 return ERR_PTR(-ENOMEM);
733
734 ispi->base = devm_ioremap_resource(dev, mem);
735 if (IS_ERR(ispi->base))
736 return ERR_CAST(ispi->base);
737
738 ispi->dev = dev;
739 ispi->info = info;
740 ispi->writeable = info->writeable;
741
742 ret = intel_spi_init(ispi);
743 if (ret)
744 return ERR_PTR(ret);
745
746 ispi->nor.dev = ispi->dev;
747 ispi->nor.priv = ispi;
748 ispi->nor.read_reg = intel_spi_read_reg;
749 ispi->nor.write_reg = intel_spi_write_reg;
750 ispi->nor.read = intel_spi_read;
751 ispi->nor.write = intel_spi_write;
752 ispi->nor.erase = intel_spi_erase;
753
754 ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
755 if (ret) {
756 dev_info(dev, "failed to locate the chip\n");
757 return ERR_PTR(ret);
758 }
759
760 intel_spi_fill_partition(ispi, &part);
761
762 /* Prevent writes if not explicitly enabled */
763 if (!ispi->writeable || !writeable)
764 ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
765
766 ret = mtd_device_parse_register(&ispi->nor.mtd, NULL, NULL, &part, 1);
767 if (ret)
768 return ERR_PTR(ret);
769
770 return ispi;
771 }
772 EXPORT_SYMBOL_GPL(intel_spi_probe);
773
774 int intel_spi_remove(struct intel_spi *ispi)
775 {
776 return mtd_device_unregister(&ispi->nor.mtd);
777 }
778 EXPORT_SYMBOL_GPL(intel_spi_remove);
779
780 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
781 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
782 MODULE_LICENSE("GPL v2");