1 /*
2 * Common Flash Interface support:
3 * Generic utility functions not dependent on command set
4 *
5 * Copyright (C) 2002 Red Hat
6 * Copyright (C) 2003 STMicroelectronics Limited
7 *
8 * This code is covered by the GPL.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <asm/io.h>
15 #include <asm/byteorder.h>
16
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25
cfi_udelay(int us)26 void cfi_udelay(int us)
27 {
28 if (us >= 1000) {
29 msleep((us+999)/1000);
30 } else {
31 udelay(us);
32 cond_resched();
33 }
34 }
35 EXPORT_SYMBOL(cfi_udelay);
36
37 /*
38 * Returns the command address according to the given geometry.
39 */
cfi_build_cmd_addr(uint32_t cmd_ofs,struct map_info * map,struct cfi_private * cfi)40 uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
41 struct map_info *map, struct cfi_private *cfi)
42 {
43 unsigned bankwidth = map_bankwidth(map);
44 unsigned interleave = cfi_interleave(cfi);
45 unsigned type = cfi->device_type;
46 uint32_t addr;
47
48 addr = (cmd_ofs * type) * interleave;
49
50 /* Modify the unlock address if we are in compatibility mode.
51 * For 16bit devices on 8 bit busses
52 * and 32bit devices on 16 bit busses
53 * set the low bit of the alternating bit sequence of the address.
54 */
55 if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
56 addr |= (type >> 1)*interleave;
57
58 return addr;
59 }
60 EXPORT_SYMBOL(cfi_build_cmd_addr);
61
62 /*
63 * Transforms the CFI command for the given geometry (bus width & interleave).
64 * It looks too long to be inline, but in the common case it should almost all
65 * get optimised away.
66 */
cfi_build_cmd(u_long cmd,struct map_info * map,struct cfi_private * cfi)67 map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
68 {
69 map_word val = { {0} };
70 int wordwidth, words_per_bus, chip_mode, chips_per_word;
71 unsigned long onecmd;
72 int i;
73
74 /* We do it this way to give the compiler a fighting chance
75 of optimising away all the crap for 'bankwidth' larger than
76 an unsigned long, in the common case where that support is
77 disabled */
78 if (map_bankwidth_is_large(map)) {
79 wordwidth = sizeof(unsigned long);
80 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
81 } else {
82 wordwidth = map_bankwidth(map);
83 words_per_bus = 1;
84 }
85
86 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
87 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
88
89 /* First, determine what the bit-pattern should be for a single
90 device, according to chip mode and endianness... */
91 switch (chip_mode) {
92 default: BUG();
93 case 1:
94 onecmd = cmd;
95 break;
96 case 2:
97 onecmd = cpu_to_cfi16(map, cmd);
98 break;
99 case 4:
100 onecmd = cpu_to_cfi32(map, cmd);
101 break;
102 }
103
104 /* Now replicate it across the size of an unsigned long, or
105 just to the bus width as appropriate */
106 switch (chips_per_word) {
107 default: BUG();
108 #if BITS_PER_LONG >= 64
109 case 8:
110 onecmd |= (onecmd << (chip_mode * 32));
111 #endif
112 case 4:
113 onecmd |= (onecmd << (chip_mode * 16));
114 case 2:
115 onecmd |= (onecmd << (chip_mode * 8));
116 case 1:
117 ;
118 }
119
120 /* And finally, for the multi-word case, replicate it
121 in all words in the structure */
122 for (i=0; i < words_per_bus; i++) {
123 val.x[i] = onecmd;
124 }
125
126 return val;
127 }
128 EXPORT_SYMBOL(cfi_build_cmd);
129
cfi_merge_status(map_word val,struct map_info * map,struct cfi_private * cfi)130 unsigned long cfi_merge_status(map_word val, struct map_info *map,
131 struct cfi_private *cfi)
132 {
133 int wordwidth, words_per_bus, chip_mode, chips_per_word;
134 unsigned long onestat, res = 0;
135 int i;
136
137 /* We do it this way to give the compiler a fighting chance
138 of optimising away all the crap for 'bankwidth' larger than
139 an unsigned long, in the common case where that support is
140 disabled */
141 if (map_bankwidth_is_large(map)) {
142 wordwidth = sizeof(unsigned long);
143 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
144 } else {
145 wordwidth = map_bankwidth(map);
146 words_per_bus = 1;
147 }
148
149 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
150 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
151
152 onestat = val.x[0];
153 /* Or all status words together */
154 for (i=1; i < words_per_bus; i++) {
155 onestat |= val.x[i];
156 }
157
158 res = onestat;
159 switch(chips_per_word) {
160 default: BUG();
161 #if BITS_PER_LONG >= 64
162 case 8:
163 res |= (onestat >> (chip_mode * 32));
164 #endif
165 case 4:
166 res |= (onestat >> (chip_mode * 16));
167 case 2:
168 res |= (onestat >> (chip_mode * 8));
169 case 1:
170 ;
171 }
172
173 /* Last, determine what the bit-pattern should be for a single
174 device, according to chip mode and endianness... */
175 switch (chip_mode) {
176 case 1:
177 break;
178 case 2:
179 res = cfi16_to_cpu(map, res);
180 break;
181 case 4:
182 res = cfi32_to_cpu(map, res);
183 break;
184 default: BUG();
185 }
186 return res;
187 }
188 EXPORT_SYMBOL(cfi_merge_status);
189
190 /*
191 * Sends a CFI command to a bank of flash for the given geometry.
192 *
193 * Returns the offset in flash where the command was written.
194 * If prev_val is non-null, it will be set to the value at the command address,
195 * before the command was written.
196 */
cfi_send_gen_cmd(u_char cmd,uint32_t cmd_addr,uint32_t base,struct map_info * map,struct cfi_private * cfi,int type,map_word * prev_val)197 uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
198 struct map_info *map, struct cfi_private *cfi,
199 int type, map_word *prev_val)
200 {
201 map_word val;
202 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
203 val = cfi_build_cmd(cmd, map, cfi);
204
205 if (prev_val)
206 *prev_val = map_read(map, addr);
207
208 map_write(map, val, addr);
209
210 return addr - base;
211 }
212 EXPORT_SYMBOL(cfi_send_gen_cmd);
213
cfi_qry_present(struct map_info * map,__u32 base,struct cfi_private * cfi)214 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
215 struct cfi_private *cfi)
216 {
217 int osf = cfi->interleave * cfi->device_type; /* scale factor */
218 map_word val[3];
219 map_word qry[3];
220
221 qry[0] = cfi_build_cmd('Q', map, cfi);
222 qry[1] = cfi_build_cmd('R', map, cfi);
223 qry[2] = cfi_build_cmd('Y', map, cfi);
224
225 val[0] = map_read(map, base + osf*0x10);
226 val[1] = map_read(map, base + osf*0x11);
227 val[2] = map_read(map, base + osf*0x12);
228
229 if (!map_word_equal(map, qry[0], val[0]))
230 return 0;
231
232 if (!map_word_equal(map, qry[1], val[1]))
233 return 0;
234
235 if (!map_word_equal(map, qry[2], val[2]))
236 return 0;
237
238 return 1; /* "QRY" found */
239 }
240 EXPORT_SYMBOL_GPL(cfi_qry_present);
241
cfi_qry_mode_on(uint32_t base,struct map_info * map,struct cfi_private * cfi)242 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
243 struct cfi_private *cfi)
244 {
245 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
246 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
247 if (cfi_qry_present(map, base, cfi))
248 return 1;
249 /* QRY not found probably we deal with some odd CFI chips */
250 /* Some revisions of some old Intel chips? */
251 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
252 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
253 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
254 if (cfi_qry_present(map, base, cfi))
255 return 1;
256 /* ST M29DW chips */
257 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
258 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
259 if (cfi_qry_present(map, base, cfi))
260 return 1;
261 /* some old SST chips, e.g. 39VF160x/39VF320x */
262 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
263 cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
264 cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
265 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
266 if (cfi_qry_present(map, base, cfi))
267 return 1;
268 /* SST 39VF640xB */
269 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
270 cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
271 cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
272 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
273 if (cfi_qry_present(map, base, cfi))
274 return 1;
275 /* QRY not found */
276 return 0;
277 }
278 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
279
cfi_qry_mode_off(uint32_t base,struct map_info * map,struct cfi_private * cfi)280 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
281 struct cfi_private *cfi)
282 {
283 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
284 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
285 /* M29W128G flashes require an additional reset command
286 when exit qry mode */
287 if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
288 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
289 }
290 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
291
292 struct cfi_extquery *
cfi_read_pri(struct map_info * map,__u16 adr,__u16 size,const char * name)293 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
294 {
295 struct cfi_private *cfi = map->fldrv_priv;
296 __u32 base = 0; // cfi->chips[0].start;
297 int ofs_factor = cfi->interleave * cfi->device_type;
298 int i;
299 struct cfi_extquery *extp = NULL;
300
301 if (!adr)
302 goto out;
303
304 printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
305
306 extp = kmalloc(size, GFP_KERNEL);
307 if (!extp)
308 goto out;
309
310 #ifdef CONFIG_MTD_XIP
311 local_irq_disable();
312 #endif
313
314 /* Switch it into Query Mode */
315 cfi_qry_mode_on(base, map, cfi);
316 /* Read in the Extended Query Table */
317 for (i=0; i<size; i++) {
318 ((unsigned char *)extp)[i] =
319 cfi_read_query(map, base+((adr+i)*ofs_factor));
320 }
321
322 /* Make sure it returns to read mode */
323 cfi_qry_mode_off(base, map, cfi);
324
325 #ifdef CONFIG_MTD_XIP
326 (void) map_read(map, base);
327 xip_iprefetch();
328 local_irq_enable();
329 #endif
330
331 out: return extp;
332 }
333
334 EXPORT_SYMBOL(cfi_read_pri);
335
cfi_fixup(struct mtd_info * mtd,struct cfi_fixup * fixups)336 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
337 {
338 struct map_info *map = mtd->priv;
339 struct cfi_private *cfi = map->fldrv_priv;
340 struct cfi_fixup *f;
341
342 for (f=fixups; f->fixup; f++) {
343 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
344 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
345 f->fixup(mtd);
346 }
347 }
348 }
349
350 EXPORT_SYMBOL(cfi_fixup);
351
cfi_varsize_frob(struct mtd_info * mtd,varsize_frob_t frob,loff_t ofs,size_t len,void * thunk)352 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
353 loff_t ofs, size_t len, void *thunk)
354 {
355 struct map_info *map = mtd->priv;
356 struct cfi_private *cfi = map->fldrv_priv;
357 unsigned long adr;
358 int chipnum, ret = 0;
359 int i, first;
360 struct mtd_erase_region_info *regions = mtd->eraseregions;
361
362 /* Check that both start and end of the requested erase are
363 * aligned with the erasesize at the appropriate addresses.
364 */
365
366 i = 0;
367
368 /* Skip all erase regions which are ended before the start of
369 the requested erase. Actually, to save on the calculations,
370 we skip to the first erase region which starts after the
371 start of the requested erase, and then go back one.
372 */
373
374 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
375 i++;
376 i--;
377
378 /* OK, now i is pointing at the erase region in which this
379 erase request starts. Check the start of the requested
380 erase range is aligned with the erase size which is in
381 effect here.
382 */
383
384 if (ofs & (regions[i].erasesize-1))
385 return -EINVAL;
386
387 /* Remember the erase region we start on */
388 first = i;
389
390 /* Next, check that the end of the requested erase is aligned
391 * with the erase region at that address.
392 */
393
394 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
395 i++;
396
397 /* As before, drop back one to point at the region in which
398 the address actually falls
399 */
400 i--;
401
402 if ((ofs + len) & (regions[i].erasesize-1))
403 return -EINVAL;
404
405 chipnum = ofs >> cfi->chipshift;
406 adr = ofs - (chipnum << cfi->chipshift);
407
408 i=first;
409
410 while(len) {
411 int size = regions[i].erasesize;
412
413 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
414
415 if (ret)
416 return ret;
417
418 adr += size;
419 ofs += size;
420 len -= size;
421
422 if (ofs == regions[i].offset + size * regions[i].numblocks)
423 i++;
424
425 if (adr >> cfi->chipshift) {
426 adr = 0;
427 chipnum++;
428
429 if (chipnum >= cfi->numchips)
430 break;
431 }
432 }
433
434 return 0;
435 }
436
437 EXPORT_SYMBOL(cfi_varsize_frob);
438
439 MODULE_LICENSE("GPL");
440