]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | |
2 | /* Common Flash Interface structures | |
3 | * See http://support.intel.com/design/flash/technote/index.htm | |
4 | * $Id: cfi.h,v 1.50 2004/11/20 12:46:51 dwmw2 Exp $ | |
5 | */ | |
6 | ||
7 | #ifndef __MTD_CFI_H__ | |
8 | #define __MTD_CFI_H__ | |
9 | ||
10 | #include <linux/config.h> | |
11 | #include <linux/version.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/mtd/flashchip.h> | |
16 | #include <linux/mtd/map.h> | |
17 | #include <linux/mtd/cfi_endian.h> | |
18 | ||
19 | #ifdef CONFIG_MTD_CFI_I1 | |
20 | #define cfi_interleave(cfi) 1 | |
21 | #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) | |
22 | #else | |
23 | #define cfi_interleave_is_1(cfi) (0) | |
24 | #endif | |
25 | ||
26 | #ifdef CONFIG_MTD_CFI_I2 | |
27 | # ifdef cfi_interleave | |
28 | # undef cfi_interleave | |
29 | # define cfi_interleave(cfi) ((cfi)->interleave) | |
30 | # else | |
31 | # define cfi_interleave(cfi) 2 | |
32 | # endif | |
33 | #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) | |
34 | #else | |
35 | #define cfi_interleave_is_2(cfi) (0) | |
36 | #endif | |
37 | ||
38 | #ifdef CONFIG_MTD_CFI_I4 | |
39 | # ifdef cfi_interleave | |
40 | # undef cfi_interleave | |
41 | # define cfi_interleave(cfi) ((cfi)->interleave) | |
42 | # else | |
43 | # define cfi_interleave(cfi) 4 | |
44 | # endif | |
45 | #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) | |
46 | #else | |
47 | #define cfi_interleave_is_4(cfi) (0) | |
48 | #endif | |
49 | ||
50 | #ifdef CONFIG_MTD_CFI_I8 | |
51 | # ifdef cfi_interleave | |
52 | # undef cfi_interleave | |
53 | # define cfi_interleave(cfi) ((cfi)->interleave) | |
54 | # else | |
55 | # define cfi_interleave(cfi) 8 | |
56 | # endif | |
57 | #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) | |
58 | #else | |
59 | #define cfi_interleave_is_8(cfi) (0) | |
60 | #endif | |
61 | ||
62 | static inline int cfi_interleave_supported(int i) | |
63 | { | |
64 | switch (i) { | |
65 | #ifdef CONFIG_MTD_CFI_I1 | |
66 | case 1: | |
67 | #endif | |
68 | #ifdef CONFIG_MTD_CFI_I2 | |
69 | case 2: | |
70 | #endif | |
71 | #ifdef CONFIG_MTD_CFI_I4 | |
72 | case 4: | |
73 | #endif | |
74 | #ifdef CONFIG_MTD_CFI_I8 | |
75 | case 8: | |
76 | #endif | |
77 | return 1; | |
78 | ||
79 | default: | |
80 | return 0; | |
81 | } | |
82 | } | |
83 | ||
84 | ||
85 | /* NB: these values must represents the number of bytes needed to meet the | |
86 | * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. | |
87 | * These numbers are used in calculations. | |
88 | */ | |
89 | #define CFI_DEVICETYPE_X8 (8 / 8) | |
90 | #define CFI_DEVICETYPE_X16 (16 / 8) | |
91 | #define CFI_DEVICETYPE_X32 (32 / 8) | |
92 | #define CFI_DEVICETYPE_X64 (64 / 8) | |
93 | ||
94 | /* NB: We keep these structures in memory in HOST byteorder, except | |
95 | * where individually noted. | |
96 | */ | |
97 | ||
98 | /* Basic Query Structure */ | |
99 | struct cfi_ident { | |
100 | uint8_t qry[3]; | |
101 | uint16_t P_ID; | |
102 | uint16_t P_ADR; | |
103 | uint16_t A_ID; | |
104 | uint16_t A_ADR; | |
105 | uint8_t VccMin; | |
106 | uint8_t VccMax; | |
107 | uint8_t VppMin; | |
108 | uint8_t VppMax; | |
109 | uint8_t WordWriteTimeoutTyp; | |
110 | uint8_t BufWriteTimeoutTyp; | |
111 | uint8_t BlockEraseTimeoutTyp; | |
112 | uint8_t ChipEraseTimeoutTyp; | |
113 | uint8_t WordWriteTimeoutMax; | |
114 | uint8_t BufWriteTimeoutMax; | |
115 | uint8_t BlockEraseTimeoutMax; | |
116 | uint8_t ChipEraseTimeoutMax; | |
117 | uint8_t DevSize; | |
118 | uint16_t InterfaceDesc; | |
119 | uint16_t MaxBufWriteSize; | |
120 | uint8_t NumEraseRegions; | |
121 | uint32_t EraseRegionInfo[0]; /* Not host ordered */ | |
122 | } __attribute__((packed)); | |
123 | ||
124 | /* Extended Query Structure for both PRI and ALT */ | |
125 | ||
126 | struct cfi_extquery { | |
127 | uint8_t pri[3]; | |
128 | uint8_t MajorVersion; | |
129 | uint8_t MinorVersion; | |
130 | } __attribute__((packed)); | |
131 | ||
132 | /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ | |
133 | ||
134 | struct cfi_pri_intelext { | |
135 | uint8_t pri[3]; | |
136 | uint8_t MajorVersion; | |
137 | uint8_t MinorVersion; | |
138 | uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature | |
139 | block follows - FIXME - not currently supported */ | |
140 | uint8_t SuspendCmdSupport; | |
141 | uint16_t BlkStatusRegMask; | |
142 | uint8_t VccOptimal; | |
143 | uint8_t VppOptimal; | |
144 | uint8_t NumProtectionFields; | |
145 | uint16_t ProtRegAddr; | |
146 | uint8_t FactProtRegSize; | |
147 | uint8_t UserProtRegSize; | |
148 | uint8_t extra[0]; | |
149 | } __attribute__((packed)); | |
150 | ||
151 | struct cfi_intelext_blockinfo { | |
152 | uint16_t NumIdentBlocks; | |
153 | uint16_t BlockSize; | |
154 | uint16_t MinBlockEraseCycles; | |
155 | uint8_t BitsPerCell; | |
156 | uint8_t BlockCap; | |
157 | } __attribute__((packed)); | |
158 | ||
159 | struct cfi_intelext_regioninfo { | |
160 | uint16_t NumIdentPartitions; | |
161 | uint8_t NumOpAllowed; | |
162 | uint8_t NumOpAllowedSimProgMode; | |
163 | uint8_t NumOpAllowedSimEraMode; | |
164 | uint8_t NumBlockTypes; | |
165 | struct cfi_intelext_blockinfo BlockTypes[1]; | |
166 | } __attribute__((packed)); | |
167 | ||
168 | /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ | |
169 | ||
170 | struct cfi_pri_amdstd { | |
171 | uint8_t pri[3]; | |
172 | uint8_t MajorVersion; | |
173 | uint8_t MinorVersion; | |
174 | uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ | |
175 | uint8_t EraseSuspend; | |
176 | uint8_t BlkProt; | |
177 | uint8_t TmpBlkUnprotect; | |
178 | uint8_t BlkProtUnprot; | |
179 | uint8_t SimultaneousOps; | |
180 | uint8_t BurstMode; | |
181 | uint8_t PageMode; | |
182 | uint8_t VppMin; | |
183 | uint8_t VppMax; | |
184 | uint8_t TopBottom; | |
185 | } __attribute__((packed)); | |
186 | ||
187 | struct cfi_pri_query { | |
188 | uint8_t NumFields; | |
189 | uint32_t ProtField[1]; /* Not host ordered */ | |
190 | } __attribute__((packed)); | |
191 | ||
192 | struct cfi_bri_query { | |
193 | uint8_t PageModeReadCap; | |
194 | uint8_t NumFields; | |
195 | uint32_t ConfField[1]; /* Not host ordered */ | |
196 | } __attribute__((packed)); | |
197 | ||
198 | #define P_ID_NONE 0x0000 | |
199 | #define P_ID_INTEL_EXT 0x0001 | |
200 | #define P_ID_AMD_STD 0x0002 | |
201 | #define P_ID_INTEL_STD 0x0003 | |
202 | #define P_ID_AMD_EXT 0x0004 | |
203 | #define P_ID_WINBOND 0x0006 | |
204 | #define P_ID_ST_ADV 0x0020 | |
205 | #define P_ID_MITSUBISHI_STD 0x0100 | |
206 | #define P_ID_MITSUBISHI_EXT 0x0101 | |
207 | #define P_ID_SST_PAGE 0x0102 | |
208 | #define P_ID_INTEL_PERFORMANCE 0x0200 | |
209 | #define P_ID_INTEL_DATA 0x0210 | |
210 | #define P_ID_RESERVED 0xffff | |
211 | ||
212 | ||
213 | #define CFI_MODE_CFI 1 | |
214 | #define CFI_MODE_JEDEC 0 | |
215 | ||
216 | struct cfi_private { | |
217 | uint16_t cmdset; | |
218 | void *cmdset_priv; | |
219 | int interleave; | |
220 | int device_type; | |
221 | int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ | |
222 | int addr_unlock1; | |
223 | int addr_unlock2; | |
224 | struct mtd_info *(*cmdset_setup)(struct map_info *); | |
225 | struct cfi_ident *cfiq; /* For now only one. We insist that all devs | |
226 | must be of the same type. */ | |
227 | int mfr, id; | |
228 | int numchips; | |
229 | unsigned long chipshift; /* Because they're of the same type */ | |
230 | const char *im_name; /* inter_module name for cmdset_setup */ | |
231 | struct flchip chips[0]; /* per-chip data structure for each chip */ | |
232 | }; | |
233 | ||
234 | /* | |
235 | * Returns the command address according to the given geometry. | |
236 | */ | |
237 | static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) | |
238 | { | |
239 | return (cmd_ofs * type) * interleave; | |
240 | } | |
241 | ||
242 | /* | |
243 | * Transforms the CFI command for the given geometry (bus width & interleave). | |
244 | * It looks too long to be inline, but in the common case it should almost all | |
245 | * get optimised away. | |
246 | */ | |
247 | static inline map_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi) | |
248 | { | |
249 | map_word val = { {0} }; | |
250 | int wordwidth, words_per_bus, chip_mode, chips_per_word; | |
251 | unsigned long onecmd; | |
252 | int i; | |
253 | ||
254 | /* We do it this way to give the compiler a fighting chance | |
255 | of optimising away all the crap for 'bankwidth' larger than | |
256 | an unsigned long, in the common case where that support is | |
257 | disabled */ | |
258 | if (map_bankwidth_is_large(map)) { | |
259 | wordwidth = sizeof(unsigned long); | |
260 | words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 | |
261 | } else { | |
262 | wordwidth = map_bankwidth(map); | |
263 | words_per_bus = 1; | |
264 | } | |
265 | ||
266 | chip_mode = map_bankwidth(map) / cfi_interleave(cfi); | |
267 | chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); | |
268 | ||
269 | /* First, determine what the bit-pattern should be for a single | |
270 | device, according to chip mode and endianness... */ | |
271 | switch (chip_mode) { | |
272 | default: BUG(); | |
273 | case 1: | |
274 | onecmd = cmd; | |
275 | break; | |
276 | case 2: | |
277 | onecmd = cpu_to_cfi16(cmd); | |
278 | break; | |
279 | case 4: | |
280 | onecmd = cpu_to_cfi32(cmd); | |
281 | break; | |
282 | } | |
283 | ||
284 | /* Now replicate it across the size of an unsigned long, or | |
285 | just to the bus width as appropriate */ | |
286 | switch (chips_per_word) { | |
287 | default: BUG(); | |
288 | #if BITS_PER_LONG >= 64 | |
289 | case 8: | |
290 | onecmd |= (onecmd << (chip_mode * 32)); | |
291 | #endif | |
292 | case 4: | |
293 | onecmd |= (onecmd << (chip_mode * 16)); | |
294 | case 2: | |
295 | onecmd |= (onecmd << (chip_mode * 8)); | |
296 | case 1: | |
297 | ; | |
298 | } | |
299 | ||
300 | /* And finally, for the multi-word case, replicate it | |
301 | in all words in the structure */ | |
302 | for (i=0; i < words_per_bus; i++) { | |
303 | val.x[i] = onecmd; | |
304 | } | |
305 | ||
306 | return val; | |
307 | } | |
308 | #define CMD(x) cfi_build_cmd((x), map, cfi) | |
309 | ||
310 | /* | |
311 | * Sends a CFI command to a bank of flash for the given geometry. | |
312 | * | |
313 | * Returns the offset in flash where the command was written. | |
314 | * If prev_val is non-null, it will be set to the value at the command address, | |
315 | * before the command was written. | |
316 | */ | |
317 | static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, | |
318 | struct map_info *map, struct cfi_private *cfi, | |
319 | int type, map_word *prev_val) | |
320 | { | |
321 | map_word val; | |
322 | uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); | |
323 | ||
324 | val = cfi_build_cmd(cmd, map, cfi); | |
325 | ||
326 | if (prev_val) | |
327 | *prev_val = map_read(map, addr); | |
328 | ||
329 | map_write(map, val, addr); | |
330 | ||
331 | return addr - base; | |
332 | } | |
333 | ||
334 | static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) | |
335 | { | |
336 | map_word val = map_read(map, addr); | |
337 | ||
338 | if (map_bankwidth_is_1(map)) { | |
339 | return val.x[0]; | |
340 | } else if (map_bankwidth_is_2(map)) { | |
341 | return cfi16_to_cpu(val.x[0]); | |
342 | } else { | |
343 | /* No point in a 64-bit byteswap since that would just be | |
344 | swapping the responses from different chips, and we are | |
345 | only interested in one chip (a representative sample) */ | |
346 | return cfi32_to_cpu(val.x[0]); | |
347 | } | |
348 | } | |
349 | ||
350 | static inline void cfi_udelay(int us) | |
351 | { | |
352 | if (us >= 1000) { | |
353 | msleep((us+999)/1000); | |
354 | } else { | |
355 | udelay(us); | |
356 | cond_resched(); | |
357 | } | |
358 | } | |
359 | ||
360 | static inline void cfi_spin_lock(spinlock_t *mutex) | |
361 | { | |
362 | spin_lock_bh(mutex); | |
363 | } | |
364 | ||
365 | static inline void cfi_spin_unlock(spinlock_t *mutex) | |
366 | { | |
367 | spin_unlock_bh(mutex); | |
368 | } | |
369 | ||
370 | struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, | |
371 | const char* name); | |
372 | struct cfi_fixup { | |
373 | uint16_t mfr; | |
374 | uint16_t id; | |
375 | void (*fixup)(struct mtd_info *mtd, void* param); | |
376 | void* param; | |
377 | }; | |
378 | ||
379 | #define CFI_MFR_ANY 0xffff | |
380 | #define CFI_ID_ANY 0xffff | |
381 | ||
382 | #define CFI_MFR_AMD 0x0001 | |
383 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ | |
384 | ||
385 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); | |
386 | ||
387 | typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, | |
388 | unsigned long adr, int len, void *thunk); | |
389 | ||
390 | int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, | |
391 | loff_t ofs, size_t len, void *thunk); | |
392 | ||
393 | ||
394 | #endif /* __MTD_CFI_H__ */ |