2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <asm/byteorder.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define PF38F4476 0x881c
48 /* STMicroelectronics chips */
49 #define M50LPW080 0x002F
50 #define M50FLW080A 0x0080
51 #define M50FLW080B 0x0081
53 #define AT49BV640D 0x02de
54 #define AT49BV640DT 0x02db
56 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
60 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
61 static void cfi_intelext_sync (struct mtd_info *);
62 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
72 size_t *, struct otp_info *);
73 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
74 size_t *, struct otp_info *);
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80 static void cfi_intelext_destroy(struct mtd_info *);
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88 size_t *retlen, void **virt, resource_size_t *phys);
89 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
99 * *********** SETUP AND PROBE BITS ***********
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103 .probe = NULL, /* Not usable directly */
104 .destroy = cfi_intelext_destroy,
105 .name = "cfi_cmdset_0001",
106 .module = THIS_MODULE
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
116 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
118 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129 for (i=11; i<32; i++) {
130 if (extp->FeatureSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
134 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136 for (i=1; i<8; i++) {
137 if (extp->SuspendCmdSupport & (1<<i))
138 printk(" - Unknown Bit %X: supported\n", i);
141 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144 for (i=2; i<3; i++) {
145 if (extp->BlkStatusRegMask & (1<<i))
146 printk(" - Unknown Bit %X Active: yes\n",i);
148 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150 for (i=6; i<16; i++) {
151 if (extp->BlkStatusRegMask & (1<<i))
152 printk(" - Unknown Bit %X Active: yes\n",i);
155 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157 if (extp->VppOptimal)
158 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166 struct map_info *map = mtd->priv;
167 struct cfi_private *cfi = map->fldrv_priv;
168 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169 struct cfi_pri_atmel atmel_pri;
170 uint32_t features = 0;
172 /* Reverse byteswapping */
173 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182 if (atmel_pri.Features & 0x01) /* chip erase supported */
184 if (atmel_pri.Features & 0x02) /* erase suspend supported */
186 if (atmel_pri.Features & 0x04) /* program suspend supported */
188 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190 if (atmel_pri.Features & 0x20) /* page mode read supported */
192 if (atmel_pri.Features & 0x40) /* queued erase supported */
194 if (atmel_pri.Features & 0x80) /* Protection bits supported */
197 extp->FeatureSupport = features;
199 /* burst write mode not supported */
200 cfi->cfiq->BufWriteTimeoutTyp = 0;
201 cfi->cfiq->BufWriteTimeoutMax = 0;
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210 cfip->FeatureSupport |= (1 << 5);
211 mtd->flags |= MTD_POWERUP_LOCK;
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd)
218 struct map_info *map = mtd->priv;
219 struct cfi_private *cfi = map->fldrv_priv;
220 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223 "erase on write disabled.\n");
224 extp->SuspendCmdSupport &= ~1;
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd)
231 struct map_info *map = mtd->priv;
232 struct cfi_private *cfi = map->fldrv_priv;
233 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235 if (cfip && (cfip->FeatureSupport&4)) {
236 cfip->FeatureSupport &= ~4;
237 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
242 static void fixup_st_m28w320ct(struct mtd_info *mtd)
244 struct map_info *map = mtd->priv;
245 struct cfi_private *cfi = map->fldrv_priv;
247 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
248 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
251 static void fixup_st_m28w320cb(struct mtd_info *mtd)
253 struct map_info *map = mtd->priv;
254 struct cfi_private *cfi = map->fldrv_priv;
256 /* Note this is done after the region info is endian swapped */
257 cfi->cfiq->EraseRegionInfo[1] =
258 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
261 static void fixup_use_point(struct mtd_info *mtd)
263 struct map_info *map = mtd->priv;
264 if (!mtd->_point && map_is_linear(map)) {
265 mtd->_point = cfi_intelext_point;
266 mtd->_unpoint = cfi_intelext_unpoint;
270 static void fixup_use_write_buffers(struct mtd_info *mtd)
272 struct map_info *map = mtd->priv;
273 struct cfi_private *cfi = map->fldrv_priv;
274 if (cfi->cfiq->BufWriteTimeoutTyp) {
275 printk(KERN_INFO "Using buffer write method\n" );
276 mtd->_write = cfi_intelext_write_buffers;
277 mtd->_writev = cfi_intelext_writev;
282 * Some chips power-up with all sectors locked by default.
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286 struct map_info *map = mtd->priv;
287 struct cfi_private *cfi = map->fldrv_priv;
288 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290 if (cfip->FeatureSupport&32) {
291 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292 mtd->flags |= MTD_POWERUP_LOCK;
296 static struct cfi_fixup cfi_fixup_table[] = {
297 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
298 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
299 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306 #if !FORCE_WORD_WRITE
307 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
315 static struct cfi_fixup jedec_fixup_table[] = {
316 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
317 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
318 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
319 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
320 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
323 static struct cfi_fixup fixup_table[] = {
324 /* The CFI vendor ids and the JEDEC vendor IDs appear
325 * to be common. It is like the devices id's are as
326 * well. This table is to pick all cases where
327 * we know that is the case.
329 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334 struct cfi_pri_intelext *extp)
336 if (cfi->mfr == CFI_MFR_INTEL &&
337 cfi->id == PF38F4476 && extp->MinorVersion == '3')
338 extp->MinorVersion = '1';
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
344 struct cfi_private *cfi = map->fldrv_priv;
345 struct cfi_pri_intelext *extp;
346 unsigned int extra_size = 0;
347 unsigned int extp_size = sizeof(*extp);
350 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
354 cfi_fixup_major_minor(cfi, extp);
356 if (extp->MajorVersion != '1' ||
357 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
359 "version %c.%c.\n", extp->MajorVersion,
365 /* Do some byteswapping if necessary */
366 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370 if (extp->MinorVersion >= '0') {
373 /* Protection Register info */
374 extra_size += (extp->NumProtectionFields - 1) *
375 sizeof(struct cfi_intelext_otpinfo);
378 if (extp->MinorVersion >= '1') {
379 /* Burst Read info */
381 if (extp_size < sizeof(*extp) + extra_size)
383 extra_size += extp->extra[extra_size - 1];
386 if (extp->MinorVersion >= '3') {
389 /* Number of hardware-partitions */
391 if (extp_size < sizeof(*extp) + extra_size)
393 nb_parts = extp->extra[extra_size - 1];
395 /* skip the sizeof(partregion) field in CFI 1.4 */
396 if (extp->MinorVersion >= '4')
399 for (i = 0; i < nb_parts; i++) {
400 struct cfi_intelext_regioninfo *rinfo;
401 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402 extra_size += sizeof(*rinfo);
403 if (extp_size < sizeof(*extp) + extra_size)
405 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406 extra_size += (rinfo->NumBlockTypes - 1)
407 * sizeof(struct cfi_intelext_blockinfo);
410 if (extp->MinorVersion >= '4')
411 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413 if (extp_size < sizeof(*extp) + extra_size) {
415 extp_size = sizeof(*extp) + extra_size;
417 if (extp_size > 4096) {
419 "%s: cfi_pri_intelext is too fat\n",
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 struct cfi_private *cfi = map->fldrv_priv;
433 struct mtd_info *mtd;
436 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
440 mtd->type = MTD_NORFLASH;
442 /* Fill in the default mtd operations */
443 mtd->_erase = cfi_intelext_erase_varsize;
444 mtd->_read = cfi_intelext_read;
445 mtd->_write = cfi_intelext_write_words;
446 mtd->_sync = cfi_intelext_sync;
447 mtd->_lock = cfi_intelext_lock;
448 mtd->_unlock = cfi_intelext_unlock;
449 mtd->_is_locked = cfi_intelext_is_locked;
450 mtd->_suspend = cfi_intelext_suspend;
451 mtd->_resume = cfi_intelext_resume;
452 mtd->flags = MTD_CAP_NORFLASH;
453 mtd->name = map->name;
455 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
457 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
459 if (cfi->cfi_mode == CFI_MODE_CFI) {
461 * It's a real CFI chip, not one for which the probe
462 * routine faked a CFI structure. So we read the feature
465 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
466 struct cfi_pri_intelext *extp;
468 extp = read_pri_intelext(map, adr);
474 /* Install our own private info structure */
475 cfi->cmdset_priv = extp;
477 cfi_fixup(mtd, cfi_fixup_table);
479 #ifdef DEBUG_CFI_FEATURES
480 /* Tell the user about it in lots of lovely detail */
481 cfi_tell_features(extp);
484 if(extp->SuspendCmdSupport & 1) {
485 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
488 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
489 /* Apply jedec specific fixups */
490 cfi_fixup(mtd, jedec_fixup_table);
492 /* Apply generic fixups */
493 cfi_fixup(mtd, fixup_table);
495 for (i=0; i< cfi->numchips; i++) {
496 if (cfi->cfiq->WordWriteTimeoutTyp)
497 cfi->chips[i].word_write_time =
498 1<<cfi->cfiq->WordWriteTimeoutTyp;
500 cfi->chips[i].word_write_time = 50000;
502 if (cfi->cfiq->BufWriteTimeoutTyp)
503 cfi->chips[i].buffer_write_time =
504 1<<cfi->cfiq->BufWriteTimeoutTyp;
505 /* No default; if it isn't specified, we won't use it */
507 if (cfi->cfiq->BlockEraseTimeoutTyp)
508 cfi->chips[i].erase_time =
509 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
511 cfi->chips[i].erase_time = 2000000;
513 if (cfi->cfiq->WordWriteTimeoutTyp &&
514 cfi->cfiq->WordWriteTimeoutMax)
515 cfi->chips[i].word_write_time_max =
516 1<<(cfi->cfiq->WordWriteTimeoutTyp +
517 cfi->cfiq->WordWriteTimeoutMax);
519 cfi->chips[i].word_write_time_max = 50000 * 8;
521 if (cfi->cfiq->BufWriteTimeoutTyp &&
522 cfi->cfiq->BufWriteTimeoutMax)
523 cfi->chips[i].buffer_write_time_max =
524 1<<(cfi->cfiq->BufWriteTimeoutTyp +
525 cfi->cfiq->BufWriteTimeoutMax);
527 if (cfi->cfiq->BlockEraseTimeoutTyp &&
528 cfi->cfiq->BlockEraseTimeoutMax)
529 cfi->chips[i].erase_time_max =
530 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
531 cfi->cfiq->BlockEraseTimeoutMax);
533 cfi->chips[i].erase_time_max = 2000000 * 8;
535 cfi->chips[i].ref_point_counter = 0;
536 init_waitqueue_head(&(cfi->chips[i].wq));
539 map->fldrv = &cfi_intelext_chipdrv;
541 return cfi_intelext_setup(mtd);
543 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
544 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
545 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
546 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
549 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
551 struct map_info *map = mtd->priv;
552 struct cfi_private *cfi = map->fldrv_priv;
553 unsigned long offset = 0;
555 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
557 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
559 mtd->size = devsize * cfi->numchips;
561 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
562 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
563 * mtd->numeraseregions, GFP_KERNEL);
564 if (!mtd->eraseregions)
567 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
568 unsigned long ernum, ersize;
569 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
570 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
572 if (mtd->erasesize < ersize) {
573 mtd->erasesize = ersize;
575 for (j=0; j<cfi->numchips; j++) {
576 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
577 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
578 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
579 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
581 offset += (ersize * ernum);
584 if (offset != devsize) {
586 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
590 for (i=0; i<mtd->numeraseregions;i++){
591 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
592 i,(unsigned long long)mtd->eraseregions[i].offset,
593 mtd->eraseregions[i].erasesize,
594 mtd->eraseregions[i].numblocks);
597 #ifdef CONFIG_MTD_OTP
598 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
599 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
600 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
601 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
602 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
603 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
606 /* This function has the potential to distort the reality
607 a bit and therefore should be called last. */
608 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
611 __module_get(THIS_MODULE);
612 register_reboot_notifier(&mtd->reboot_notifier);
616 kfree(mtd->eraseregions);
618 kfree(cfi->cmdset_priv);
622 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
623 struct cfi_private **pcfi)
625 struct map_info *map = mtd->priv;
626 struct cfi_private *cfi = *pcfi;
627 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
630 * Probing of multi-partition flash chips.
632 * To support multiple partitions when available, we simply arrange
633 * for each of them to have their own flchip structure even if they
634 * are on the same physical chip. This means completely recreating
635 * a new cfi_private structure right here which is a blatent code
636 * layering violation, but this is still the least intrusive
637 * arrangement at this point. This can be rearranged in the future
638 * if someone feels motivated enough. --nico
640 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
641 && extp->FeatureSupport & (1 << 9)) {
642 struct cfi_private *newcfi;
644 struct flchip_shared *shared;
645 int offs, numregions, numparts, partshift, numvirtchips, i, j;
647 /* Protection Register info */
648 offs = (extp->NumProtectionFields - 1) *
649 sizeof(struct cfi_intelext_otpinfo);
651 /* Burst Read info */
652 offs += extp->extra[offs+1]+2;
654 /* Number of partition regions */
655 numregions = extp->extra[offs];
658 /* skip the sizeof(partregion) field in CFI 1.4 */
659 if (extp->MinorVersion >= '4')
662 /* Number of hardware partitions */
664 for (i = 0; i < numregions; i++) {
665 struct cfi_intelext_regioninfo *rinfo;
666 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
667 numparts += rinfo->NumIdentPartitions;
668 offs += sizeof(*rinfo)
669 + (rinfo->NumBlockTypes - 1) *
670 sizeof(struct cfi_intelext_blockinfo);
676 /* Programming Region info */
677 if (extp->MinorVersion >= '4') {
678 struct cfi_intelext_programming_regioninfo *prinfo;
679 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
680 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
681 mtd->flags &= ~MTD_BIT_WRITEABLE;
682 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
683 map->name, mtd->writesize,
684 cfi->interleave * prinfo->ControlValid,
685 cfi->interleave * prinfo->ControlInvalid);
689 * All functions below currently rely on all chips having
690 * the same geometry so we'll just assume that all hardware
691 * partitions are of the same size too.
693 partshift = cfi->chipshift - __ffs(numparts);
695 if ((1 << partshift) < mtd->erasesize) {
697 "%s: bad number of hw partitions (%d)\n",
702 numvirtchips = cfi->numchips * numparts;
703 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
706 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
711 memcpy(newcfi, cfi, sizeof(struct cfi_private));
712 newcfi->numchips = numvirtchips;
713 newcfi->chipshift = partshift;
715 chip = &newcfi->chips[0];
716 for (i = 0; i < cfi->numchips; i++) {
717 shared[i].writing = shared[i].erasing = NULL;
718 mutex_init(&shared[i].lock);
719 for (j = 0; j < numparts; j++) {
720 *chip = cfi->chips[i];
721 chip->start += j << partshift;
722 chip->priv = &shared[i];
723 /* those should be reset too since
724 they create memory references. */
725 init_waitqueue_head(&chip->wq);
726 mutex_init(&chip->mutex);
731 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
732 "--> %d partitions of %d KiB\n",
733 map->name, cfi->numchips, cfi->interleave,
734 newcfi->numchips, 1<<(newcfi->chipshift-10));
736 map->fldrv_priv = newcfi;
745 * *********** CHIP ACCESS FUNCTIONS ***********
747 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
749 DECLARE_WAITQUEUE(wait, current);
750 struct cfi_private *cfi = map->fldrv_priv;
751 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
752 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
753 unsigned long timeo = jiffies + HZ;
755 /* Prevent setting state FL_SYNCING for chip in suspended state. */
756 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
759 switch (chip->state) {
763 status = map_read(map, adr);
764 if (map_word_andequal(map, status, status_OK, status_OK))
767 /* At this point we're fine with write operations
768 in other partitions as they don't conflict. */
769 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
772 mutex_unlock(&chip->mutex);
774 mutex_lock(&chip->mutex);
775 /* Someone else might have been playing with it. */
786 !(cfip->FeatureSupport & 2) ||
787 !(mode == FL_READY || mode == FL_POINT ||
788 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
793 map_write(map, CMD(0xB0), adr);
795 /* If the flash has finished erasing, then 'erase suspend'
796 * appears to make some (28F320) flash devices switch to
797 * 'read' mode. Make sure that we switch to 'read status'
798 * mode so we get the right data. --rmk
800 map_write(map, CMD(0x70), adr);
801 chip->oldstate = FL_ERASING;
802 chip->state = FL_ERASE_SUSPENDING;
803 chip->erase_suspended = 1;
805 status = map_read(map, adr);
806 if (map_word_andequal(map, status, status_OK, status_OK))
809 if (time_after(jiffies, timeo)) {
810 /* Urgh. Resume and pretend we weren't here.
811 * Make sure we're in 'read status' mode if it had finished */
812 put_chip(map, chip, adr);
813 printk(KERN_ERR "%s: Chip not ready after erase "
814 "suspended: status = 0x%lx\n", map->name, status.x[0]);
818 mutex_unlock(&chip->mutex);
820 mutex_lock(&chip->mutex);
821 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
822 So we can just loop here. */
824 chip->state = FL_STATUS;
827 case FL_XIP_WHILE_ERASING:
828 if (mode != FL_READY && mode != FL_POINT &&
829 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
831 chip->oldstate = chip->state;
832 chip->state = FL_READY;
836 /* The machine is rebooting now,so no one can get chip anymore */
839 /* Only if there's no operation suspended... */
840 if (mode == FL_READY && chip->oldstate == FL_READY)
845 set_current_state(TASK_UNINTERRUPTIBLE);
846 add_wait_queue(&chip->wq, &wait);
847 mutex_unlock(&chip->mutex);
849 remove_wait_queue(&chip->wq, &wait);
850 mutex_lock(&chip->mutex);
855 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
858 DECLARE_WAITQUEUE(wait, current);
862 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
863 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
865 * OK. We have possibility for contention on the write/erase
866 * operations which are global to the real chip and not per
867 * partition. So let's fight it over in the partition which
868 * currently has authority on the operation.
870 * The rules are as follows:
872 * - any write operation must own shared->writing.
874 * - any erase operation must own _both_ shared->writing and
877 * - contention arbitration is handled in the owner's context.
879 * The 'shared' struct can be read and/or written only when
882 struct flchip_shared *shared = chip->priv;
883 struct flchip *contender;
884 mutex_lock(&shared->lock);
885 contender = shared->writing;
886 if (contender && contender != chip) {
888 * The engine to perform desired operation on this
889 * partition is already in use by someone else.
890 * Let's fight over it in the context of the chip
891 * currently using it. If it is possible to suspend,
892 * that other partition will do just that, otherwise
893 * it'll happily send us to sleep. In any case, when
894 * get_chip returns success we're clear to go ahead.
896 ret = mutex_trylock(&contender->mutex);
897 mutex_unlock(&shared->lock);
900 mutex_unlock(&chip->mutex);
901 ret = chip_ready(map, contender, contender->start, mode);
902 mutex_lock(&chip->mutex);
904 if (ret == -EAGAIN) {
905 mutex_unlock(&contender->mutex);
909 mutex_unlock(&contender->mutex);
912 mutex_lock(&shared->lock);
914 /* We should not own chip if it is already
915 * in FL_SYNCING state. Put contender and retry. */
916 if (chip->state == FL_SYNCING) {
917 put_chip(map, contender, contender->start);
918 mutex_unlock(&contender->mutex);
921 mutex_unlock(&contender->mutex);
924 /* Check if we already have suspended erase
925 * on this chip. Sleep. */
926 if (mode == FL_ERASING && shared->erasing
927 && shared->erasing->oldstate == FL_ERASING) {
928 mutex_unlock(&shared->lock);
929 set_current_state(TASK_UNINTERRUPTIBLE);
930 add_wait_queue(&chip->wq, &wait);
931 mutex_unlock(&chip->mutex);
933 remove_wait_queue(&chip->wq, &wait);
934 mutex_lock(&chip->mutex);
939 shared->writing = chip;
940 if (mode == FL_ERASING)
941 shared->erasing = chip;
942 mutex_unlock(&shared->lock);
944 ret = chip_ready(map, chip, adr, mode);
951 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
953 struct cfi_private *cfi = map->fldrv_priv;
956 struct flchip_shared *shared = chip->priv;
957 mutex_lock(&shared->lock);
958 if (shared->writing == chip && chip->oldstate == FL_READY) {
959 /* We own the ability to write, but we're done */
960 shared->writing = shared->erasing;
961 if (shared->writing && shared->writing != chip) {
962 /* give back ownership to who we loaned it from */
963 struct flchip *loaner = shared->writing;
964 mutex_lock(&loaner->mutex);
965 mutex_unlock(&shared->lock);
966 mutex_unlock(&chip->mutex);
967 put_chip(map, loaner, loaner->start);
968 mutex_lock(&chip->mutex);
969 mutex_unlock(&loaner->mutex);
973 shared->erasing = NULL;
974 shared->writing = NULL;
975 } else if (shared->erasing == chip && shared->writing != chip) {
977 * We own the ability to erase without the ability
978 * to write, which means the erase was suspended
979 * and some other partition is currently writing.
980 * Don't let the switch below mess things up since
981 * we don't have ownership to resume anything.
983 mutex_unlock(&shared->lock);
987 mutex_unlock(&shared->lock);
990 switch(chip->oldstate) {
992 /* What if one interleaved chip has finished and the
993 other hasn't? The old code would leave the finished
994 one in READY mode. That's bad, and caused -EROFS
995 errors to be returned from do_erase_oneblock because
996 that's the only bit it checked for at the time.
997 As the state machine appears to explicitly allow
998 sending the 0x70 (Read Status) command to an erasing
999 chip and expecting it to be ignored, that's what we
1001 map_write(map, CMD(0xd0), adr);
1002 map_write(map, CMD(0x70), adr);
1003 chip->oldstate = FL_READY;
1004 chip->state = FL_ERASING;
1007 case FL_XIP_WHILE_ERASING:
1008 chip->state = chip->oldstate;
1009 chip->oldstate = FL_READY;
1014 case FL_JEDEC_QUERY:
1017 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1022 #ifdef CONFIG_MTD_XIP
1025 * No interrupt what so ever can be serviced while the flash isn't in array
1026 * mode. This is ensured by the xip_disable() and xip_enable() functions
1027 * enclosing any code path where the flash is known not to be in array mode.
1028 * And within a XIP disabled code path, only functions marked with __xipram
1029 * may be called and nothing else (it's a good thing to inspect generated
1030 * assembly to make sure inline functions were actually inlined and that gcc
1031 * didn't emit calls to its own support functions). Also configuring MTD CFI
1032 * support to a single buswidth and a single interleave is also recommended.
1035 static void xip_disable(struct map_info *map, struct flchip *chip,
1038 /* TODO: chips with no XIP use should ignore and return */
1039 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1040 local_irq_disable();
1043 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1046 struct cfi_private *cfi = map->fldrv_priv;
1047 if (chip->state != FL_POINT && chip->state != FL_READY) {
1048 map_write(map, CMD(0xff), adr);
1049 chip->state = FL_READY;
1051 (void) map_read(map, adr);
1057 * When a delay is required for the flash operation to complete, the
1058 * xip_wait_for_operation() function is polling for both the given timeout
1059 * and pending (but still masked) hardware interrupts. Whenever there is an
1060 * interrupt pending then the flash erase or write operation is suspended,
1061 * array mode restored and interrupts unmasked. Task scheduling might also
1062 * happen at that point. The CPU eventually returns from the interrupt or
1063 * the call to schedule() and the suspended flash operation is resumed for
1064 * the remaining of the delay period.
1066 * Warning: this function _will_ fool interrupt latency tracing tools.
1069 static int __xipram xip_wait_for_operation(
1070 struct map_info *map, struct flchip *chip,
1071 unsigned long adr, unsigned int chip_op_time_max)
1073 struct cfi_private *cfi = map->fldrv_priv;
1074 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1075 map_word status, OK = CMD(0x80);
1076 unsigned long usec, suspended, start, done;
1077 flstate_t oldstate, newstate;
1079 start = xip_currtime();
1080 usec = chip_op_time_max;
1087 if (xip_irqpending() && cfip &&
1088 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1089 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1090 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1092 * Let's suspend the erase or write operation when
1093 * supported. Note that we currently don't try to
1094 * suspend interleaved chips if there is already
1095 * another operation suspended (imagine what happens
1096 * when one chip was already done with the current
1097 * operation while another chip suspended it, then
1098 * we resume the whole thing at once). Yes, it
1102 map_write(map, CMD(0xb0), adr);
1103 map_write(map, CMD(0x70), adr);
1104 suspended = xip_currtime();
1106 if (xip_elapsed_since(suspended) > 100000) {
1108 * The chip doesn't want to suspend
1109 * after waiting for 100 msecs.
1110 * This is a critical error but there
1111 * is not much we can do here.
1115 status = map_read(map, adr);
1116 } while (!map_word_andequal(map, status, OK, OK));
1118 /* Suspend succeeded */
1119 oldstate = chip->state;
1120 if (oldstate == FL_ERASING) {
1121 if (!map_word_bitsset(map, status, CMD(0x40)))
1123 newstate = FL_XIP_WHILE_ERASING;
1124 chip->erase_suspended = 1;
1126 if (!map_word_bitsset(map, status, CMD(0x04)))
1128 newstate = FL_XIP_WHILE_WRITING;
1129 chip->write_suspended = 1;
1131 chip->state = newstate;
1132 map_write(map, CMD(0xff), adr);
1133 (void) map_read(map, adr);
1136 mutex_unlock(&chip->mutex);
1141 * We're back. However someone else might have
1142 * decided to go write to the chip if we are in
1143 * a suspended erase state. If so let's wait
1146 mutex_lock(&chip->mutex);
1147 while (chip->state != newstate) {
1148 DECLARE_WAITQUEUE(wait, current);
1149 set_current_state(TASK_UNINTERRUPTIBLE);
1150 add_wait_queue(&chip->wq, &wait);
1151 mutex_unlock(&chip->mutex);
1153 remove_wait_queue(&chip->wq, &wait);
1154 mutex_lock(&chip->mutex);
1156 /* Disallow XIP again */
1157 local_irq_disable();
1159 /* Resume the write or erase operation */
1160 map_write(map, CMD(0xd0), adr);
1161 map_write(map, CMD(0x70), adr);
1162 chip->state = oldstate;
1163 start = xip_currtime();
1164 } else if (usec >= 1000000/HZ) {
1166 * Try to save on CPU power when waiting delay
1167 * is at least a system timer tick period.
1168 * No need to be extremely accurate here.
1172 status = map_read(map, adr);
1173 done = xip_elapsed_since(start);
1174 } while (!map_word_andequal(map, status, OK, OK)
1177 return (done >= usec) ? -ETIME : 0;
1181 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1182 * the flash is actively programming or erasing since we have to poll for
1183 * the operation to complete anyway. We can't do that in a generic way with
1184 * a XIP setup so do it before the actual flash operation in this case
1185 * and stub it out from INVAL_CACHE_AND_WAIT.
1187 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1188 INVALIDATE_CACHED_RANGE(map, from, size)
1190 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1191 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1195 #define xip_disable(map, chip, adr)
1196 #define xip_enable(map, chip, adr)
1197 #define XIP_INVAL_CACHED_RANGE(x...)
1198 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1200 static int inval_cache_and_wait_for_operation(
1201 struct map_info *map, struct flchip *chip,
1202 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1203 unsigned int chip_op_time, unsigned int chip_op_time_max)
1205 struct cfi_private *cfi = map->fldrv_priv;
1206 map_word status, status_OK = CMD(0x80);
1207 int chip_state = chip->state;
1208 unsigned int timeo, sleep_time, reset_timeo;
1210 mutex_unlock(&chip->mutex);
1212 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1213 mutex_lock(&chip->mutex);
1215 timeo = chip_op_time_max;
1218 reset_timeo = timeo;
1219 sleep_time = chip_op_time / 2;
1222 if (chip->state != chip_state) {
1223 /* Someone's suspended the operation: sleep */
1224 DECLARE_WAITQUEUE(wait, current);
1225 set_current_state(TASK_UNINTERRUPTIBLE);
1226 add_wait_queue(&chip->wq, &wait);
1227 mutex_unlock(&chip->mutex);
1229 remove_wait_queue(&chip->wq, &wait);
1230 mutex_lock(&chip->mutex);
1234 status = map_read(map, cmd_adr);
1235 if (map_word_andequal(map, status, status_OK, status_OK))
1238 if (chip->erase_suspended && chip_state == FL_ERASING) {
1239 /* Erase suspend occurred while sleep: reset timeout */
1240 timeo = reset_timeo;
1241 chip->erase_suspended = 0;
1243 if (chip->write_suspended && chip_state == FL_WRITING) {
1244 /* Write suspend occurred while sleep: reset timeout */
1245 timeo = reset_timeo;
1246 chip->write_suspended = 0;
1249 map_write(map, CMD(0x70), cmd_adr);
1250 chip->state = FL_STATUS;
1254 /* OK Still waiting. Drop the lock, wait a while and retry. */
1255 mutex_unlock(&chip->mutex);
1256 if (sleep_time >= 1000000/HZ) {
1258 * Half of the normal delay still remaining
1259 * can be performed with a sleeping delay instead
1262 msleep(sleep_time/1000);
1263 timeo -= sleep_time;
1264 sleep_time = 1000000/HZ;
1270 mutex_lock(&chip->mutex);
1273 /* Done and happy. */
1274 chip->state = FL_STATUS;
1280 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1281 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1284 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1286 unsigned long cmd_addr;
1287 struct cfi_private *cfi = map->fldrv_priv;
1292 /* Ensure cmd read/writes are aligned. */
1293 cmd_addr = adr & ~(map_bankwidth(map)-1);
1295 mutex_lock(&chip->mutex);
1297 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1300 if (chip->state != FL_POINT && chip->state != FL_READY)
1301 map_write(map, CMD(0xff), cmd_addr);
1303 chip->state = FL_POINT;
1304 chip->ref_point_counter++;
1306 mutex_unlock(&chip->mutex);
1311 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1312 size_t *retlen, void **virt, resource_size_t *phys)
1314 struct map_info *map = mtd->priv;
1315 struct cfi_private *cfi = map->fldrv_priv;
1316 unsigned long ofs, last_end = 0;
1323 /* Now lock the chip(s) to POINT state */
1325 /* ofs: offset within the first chip that the first read should start */
1326 chipnum = (from >> cfi->chipshift);
1327 ofs = from - (chipnum << cfi->chipshift);
1329 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1331 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1334 unsigned long thislen;
1336 if (chipnum >= cfi->numchips)
1339 /* We cannot point across chips that are virtually disjoint */
1341 last_end = cfi->chips[chipnum].start;
1342 else if (cfi->chips[chipnum].start != last_end)
1345 if ((len + ofs -1) >> cfi->chipshift)
1346 thislen = (1<<cfi->chipshift) - ofs;
1350 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1358 last_end += 1 << cfi->chipshift;
1364 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1366 struct map_info *map = mtd->priv;
1367 struct cfi_private *cfi = map->fldrv_priv;
1369 int chipnum, err = 0;
1371 /* Now unlock the chip(s) POINT state */
1373 /* ofs: offset within the first chip that the first read should start */
1374 chipnum = (from >> cfi->chipshift);
1375 ofs = from - (chipnum << cfi->chipshift);
1377 while (len && !err) {
1378 unsigned long thislen;
1379 struct flchip *chip;
1381 chip = &cfi->chips[chipnum];
1382 if (chipnum >= cfi->numchips)
1385 if ((len + ofs -1) >> cfi->chipshift)
1386 thislen = (1<<cfi->chipshift) - ofs;
1390 mutex_lock(&chip->mutex);
1391 if (chip->state == FL_POINT) {
1392 chip->ref_point_counter--;
1393 if(chip->ref_point_counter == 0)
1394 chip->state = FL_READY;
1396 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1400 put_chip(map, chip, chip->start);
1401 mutex_unlock(&chip->mutex);
1411 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1413 unsigned long cmd_addr;
1414 struct cfi_private *cfi = map->fldrv_priv;
1419 /* Ensure cmd read/writes are aligned. */
1420 cmd_addr = adr & ~(map_bankwidth(map)-1);
1422 mutex_lock(&chip->mutex);
1423 ret = get_chip(map, chip, cmd_addr, FL_READY);
1425 mutex_unlock(&chip->mutex);
1429 if (chip->state != FL_POINT && chip->state != FL_READY) {
1430 map_write(map, CMD(0xff), cmd_addr);
1432 chip->state = FL_READY;
1435 map_copy_from(map, buf, adr, len);
1437 put_chip(map, chip, cmd_addr);
1439 mutex_unlock(&chip->mutex);
1443 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1445 struct map_info *map = mtd->priv;
1446 struct cfi_private *cfi = map->fldrv_priv;
1451 /* ofs: offset within the first chip that the first read should start */
1452 chipnum = (from >> cfi->chipshift);
1453 ofs = from - (chipnum << cfi->chipshift);
1456 unsigned long thislen;
1458 if (chipnum >= cfi->numchips)
1461 if ((len + ofs -1) >> cfi->chipshift)
1462 thislen = (1<<cfi->chipshift) - ofs;
1466 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1480 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1481 unsigned long adr, map_word datum, int mode)
1483 struct cfi_private *cfi = map->fldrv_priv;
1484 map_word status, write_cmd;
1491 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1494 write_cmd = CMD(0xc0);
1500 mutex_lock(&chip->mutex);
1501 ret = get_chip(map, chip, adr, mode);
1503 mutex_unlock(&chip->mutex);
1507 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1509 xip_disable(map, chip, adr);
1510 map_write(map, write_cmd, adr);
1511 map_write(map, datum, adr);
1514 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1515 adr, map_bankwidth(map),
1516 chip->word_write_time,
1517 chip->word_write_time_max);
1519 xip_enable(map, chip, adr);
1520 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1524 /* check for errors */
1525 status = map_read(map, adr);
1526 if (map_word_bitsset(map, status, CMD(0x1a))) {
1527 unsigned long chipstatus = MERGESTATUS(status);
1530 map_write(map, CMD(0x50), adr);
1531 map_write(map, CMD(0x70), adr);
1532 xip_enable(map, chip, adr);
1534 if (chipstatus & 0x02) {
1536 } else if (chipstatus & 0x08) {
1537 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1540 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1547 xip_enable(map, chip, adr);
1548 out: DISABLE_VPP(map);
1549 put_chip(map, chip, adr);
1550 mutex_unlock(&chip->mutex);
1555 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1557 struct map_info *map = mtd->priv;
1558 struct cfi_private *cfi = map->fldrv_priv;
1563 chipnum = to >> cfi->chipshift;
1564 ofs = to - (chipnum << cfi->chipshift);
1566 /* If it's not bus-aligned, do the first byte write */
1567 if (ofs & (map_bankwidth(map)-1)) {
1568 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1569 int gap = ofs - bus_ofs;
1573 n = min_t(int, len, map_bankwidth(map)-gap);
1574 datum = map_word_ff(map);
1575 datum = map_word_load_partial(map, datum, buf, gap, n);
1577 ret = do_write_oneword(map, &cfi->chips[chipnum],
1578 bus_ofs, datum, FL_WRITING);
1587 if (ofs >> cfi->chipshift) {
1590 if (chipnum == cfi->numchips)
1595 while(len >= map_bankwidth(map)) {
1596 map_word datum = map_word_load(map, buf);
1598 ret = do_write_oneword(map, &cfi->chips[chipnum],
1599 ofs, datum, FL_WRITING);
1603 ofs += map_bankwidth(map);
1604 buf += map_bankwidth(map);
1605 (*retlen) += map_bankwidth(map);
1606 len -= map_bankwidth(map);
1608 if (ofs >> cfi->chipshift) {
1611 if (chipnum == cfi->numchips)
1616 if (len & (map_bankwidth(map)-1)) {
1619 datum = map_word_ff(map);
1620 datum = map_word_load_partial(map, datum, buf, 0, len);
1622 ret = do_write_oneword(map, &cfi->chips[chipnum],
1623 ofs, datum, FL_WRITING);
1634 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1635 unsigned long adr, const struct kvec **pvec,
1636 unsigned long *pvec_seek, int len)
1638 struct cfi_private *cfi = map->fldrv_priv;
1639 map_word status, write_cmd, datum;
1640 unsigned long cmd_adr;
1641 int ret, wbufsize, word_gap, words;
1642 const struct kvec *vec;
1643 unsigned long vec_seek;
1644 unsigned long initial_adr;
1645 int initial_len = len;
1647 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1650 cmd_adr = adr & ~(wbufsize-1);
1652 /* Let's determine this according to the interleave only once */
1653 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1655 mutex_lock(&chip->mutex);
1656 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1658 mutex_unlock(&chip->mutex);
1662 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1664 xip_disable(map, chip, cmd_adr);
1666 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1667 [...], the device will not accept any more Write to Buffer commands".
1668 So we must check here and reset those bits if they're set. Otherwise
1669 we're just pissing in the wind */
1670 if (chip->state != FL_STATUS) {
1671 map_write(map, CMD(0x70), cmd_adr);
1672 chip->state = FL_STATUS;
1674 status = map_read(map, cmd_adr);
1675 if (map_word_bitsset(map, status, CMD(0x30))) {
1676 xip_enable(map, chip, cmd_adr);
1677 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1678 xip_disable(map, chip, cmd_adr);
1679 map_write(map, CMD(0x50), cmd_adr);
1680 map_write(map, CMD(0x70), cmd_adr);
1683 chip->state = FL_WRITING_TO_BUFFER;
1684 map_write(map, write_cmd, cmd_adr);
1685 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1687 /* Argh. Not ready for write to buffer */
1688 map_word Xstatus = map_read(map, cmd_adr);
1689 map_write(map, CMD(0x70), cmd_adr);
1690 chip->state = FL_STATUS;
1691 status = map_read(map, cmd_adr);
1692 map_write(map, CMD(0x50), cmd_adr);
1693 map_write(map, CMD(0x70), cmd_adr);
1694 xip_enable(map, chip, cmd_adr);
1695 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1696 map->name, Xstatus.x[0], status.x[0]);
1700 /* Figure out the number of words to write */
1701 word_gap = (-adr & (map_bankwidth(map)-1));
1702 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1706 word_gap = map_bankwidth(map) - word_gap;
1708 datum = map_word_ff(map);
1711 /* Write length of data to come */
1712 map_write(map, CMD(words), cmd_adr );
1716 vec_seek = *pvec_seek;
1718 int n = map_bankwidth(map) - word_gap;
1719 if (n > vec->iov_len - vec_seek)
1720 n = vec->iov_len - vec_seek;
1724 if (!word_gap && len < map_bankwidth(map))
1725 datum = map_word_ff(map);
1727 datum = map_word_load_partial(map, datum,
1728 vec->iov_base + vec_seek,
1733 if (!len || word_gap == map_bankwidth(map)) {
1734 map_write(map, datum, adr);
1735 adr += map_bankwidth(map);
1740 if (vec_seek == vec->iov_len) {
1746 *pvec_seek = vec_seek;
1749 map_write(map, CMD(0xd0), cmd_adr);
1750 chip->state = FL_WRITING;
1752 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1753 initial_adr, initial_len,
1754 chip->buffer_write_time,
1755 chip->buffer_write_time_max);
1757 map_write(map, CMD(0x70), cmd_adr);
1758 chip->state = FL_STATUS;
1759 xip_enable(map, chip, cmd_adr);
1760 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1764 /* check for errors */
1765 status = map_read(map, cmd_adr);
1766 if (map_word_bitsset(map, status, CMD(0x1a))) {
1767 unsigned long chipstatus = MERGESTATUS(status);
1770 map_write(map, CMD(0x50), cmd_adr);
1771 map_write(map, CMD(0x70), cmd_adr);
1772 xip_enable(map, chip, cmd_adr);
1774 if (chipstatus & 0x02) {
1776 } else if (chipstatus & 0x08) {
1777 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1780 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1787 xip_enable(map, chip, cmd_adr);
1788 out: DISABLE_VPP(map);
1789 put_chip(map, chip, cmd_adr);
1790 mutex_unlock(&chip->mutex);
1794 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1795 unsigned long count, loff_t to, size_t *retlen)
1797 struct map_info *map = mtd->priv;
1798 struct cfi_private *cfi = map->fldrv_priv;
1799 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1802 unsigned long ofs, vec_seek, i;
1805 for (i = 0; i < count; i++)
1806 len += vecs[i].iov_len;
1811 chipnum = to >> cfi->chipshift;
1812 ofs = to - (chipnum << cfi->chipshift);
1816 /* We must not cross write block boundaries */
1817 int size = wbufsize - (ofs & (wbufsize-1));
1821 ret = do_write_buffer(map, &cfi->chips[chipnum],
1822 ofs, &vecs, &vec_seek, size);
1830 if (ofs >> cfi->chipshift) {
1833 if (chipnum == cfi->numchips)
1837 /* Be nice and reschedule with the chip in a usable state for other
1846 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1847 size_t len, size_t *retlen, const u_char *buf)
1851 vec.iov_base = (void *) buf;
1854 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1857 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1858 unsigned long adr, int len, void *thunk)
1860 struct cfi_private *cfi = map->fldrv_priv;
1868 mutex_lock(&chip->mutex);
1869 ret = get_chip(map, chip, adr, FL_ERASING);
1871 mutex_unlock(&chip->mutex);
1875 XIP_INVAL_CACHED_RANGE(map, adr, len);
1877 xip_disable(map, chip, adr);
1879 /* Clear the status register first */
1880 map_write(map, CMD(0x50), adr);
1883 map_write(map, CMD(0x20), adr);
1884 map_write(map, CMD(0xD0), adr);
1885 chip->state = FL_ERASING;
1886 chip->erase_suspended = 0;
1888 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1891 chip->erase_time_max);
1893 map_write(map, CMD(0x70), adr);
1894 chip->state = FL_STATUS;
1895 xip_enable(map, chip, adr);
1896 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1900 /* We've broken this before. It doesn't hurt to be safe */
1901 map_write(map, CMD(0x70), adr);
1902 chip->state = FL_STATUS;
1903 status = map_read(map, adr);
1905 /* check for errors */
1906 if (map_word_bitsset(map, status, CMD(0x3a))) {
1907 unsigned long chipstatus = MERGESTATUS(status);
1909 /* Reset the error bits */
1910 map_write(map, CMD(0x50), adr);
1911 map_write(map, CMD(0x70), adr);
1912 xip_enable(map, chip, adr);
1914 if ((chipstatus & 0x30) == 0x30) {
1915 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1917 } else if (chipstatus & 0x02) {
1918 /* Protection bit set */
1920 } else if (chipstatus & 0x8) {
1922 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1924 } else if (chipstatus & 0x20 && retries--) {
1925 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1927 put_chip(map, chip, adr);
1928 mutex_unlock(&chip->mutex);
1931 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1938 xip_enable(map, chip, adr);
1939 out: DISABLE_VPP(map);
1940 put_chip(map, chip, adr);
1941 mutex_unlock(&chip->mutex);
1945 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1947 unsigned long ofs, len;
1953 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1957 instr->state = MTD_ERASE_DONE;
1958 mtd_erase_callback(instr);
1963 static void cfi_intelext_sync (struct mtd_info *mtd)
1965 struct map_info *map = mtd->priv;
1966 struct cfi_private *cfi = map->fldrv_priv;
1968 struct flchip *chip;
1971 for (i=0; !ret && i<cfi->numchips; i++) {
1972 chip = &cfi->chips[i];
1974 mutex_lock(&chip->mutex);
1975 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1978 chip->oldstate = chip->state;
1979 chip->state = FL_SYNCING;
1980 /* No need to wake_up() on this state change -
1981 * as the whole point is that nobody can do anything
1982 * with the chip now anyway.
1985 mutex_unlock(&chip->mutex);
1988 /* Unlock the chips again */
1990 for (i--; i >=0; i--) {
1991 chip = &cfi->chips[i];
1993 mutex_lock(&chip->mutex);
1995 if (chip->state == FL_SYNCING) {
1996 chip->state = chip->oldstate;
1997 chip->oldstate = FL_READY;
2000 mutex_unlock(&chip->mutex);
2004 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2005 struct flchip *chip,
2007 int len, void *thunk)
2009 struct cfi_private *cfi = map->fldrv_priv;
2010 int status, ofs_factor = cfi->interleave * cfi->device_type;
2013 xip_disable(map, chip, adr+(2*ofs_factor));
2014 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2015 chip->state = FL_JEDEC_QUERY;
2016 status = cfi_read_query(map, adr+(2*ofs_factor));
2017 xip_enable(map, chip, 0);
2021 #ifdef DEBUG_LOCK_BITS
2022 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2023 struct flchip *chip,
2025 int len, void *thunk)
2027 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2028 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2033 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2034 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2036 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2037 unsigned long adr, int len, void *thunk)
2039 struct cfi_private *cfi = map->fldrv_priv;
2040 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2046 mutex_lock(&chip->mutex);
2047 ret = get_chip(map, chip, adr, FL_LOCKING);
2049 mutex_unlock(&chip->mutex);
2054 xip_disable(map, chip, adr);
2056 map_write(map, CMD(0x60), adr);
2057 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2058 map_write(map, CMD(0x01), adr);
2059 chip->state = FL_LOCKING;
2060 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2061 map_write(map, CMD(0xD0), adr);
2062 chip->state = FL_UNLOCKING;
2067 * If Instant Individual Block Locking supported then no need
2071 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2072 * lets use a max of 1.5 seconds (1500ms) as timeout.
2074 * See "Clear Block Lock-Bits Time" on page 40 in
2075 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2076 * from February 2003
2078 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2080 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2082 map_write(map, CMD(0x70), adr);
2083 chip->state = FL_STATUS;
2084 xip_enable(map, chip, adr);
2085 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2089 xip_enable(map, chip, adr);
2090 out: DISABLE_VPP(map);
2091 put_chip(map, chip, adr);
2092 mutex_unlock(&chip->mutex);
2096 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2100 #ifdef DEBUG_LOCK_BITS
2101 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2102 __func__, ofs, len);
2103 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2107 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2108 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2110 #ifdef DEBUG_LOCK_BITS
2111 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2113 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2120 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2124 #ifdef DEBUG_LOCK_BITS
2125 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2126 __func__, ofs, len);
2127 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2131 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2132 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2134 #ifdef DEBUG_LOCK_BITS
2135 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2137 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2144 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2147 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2148 ofs, len, NULL) ? 1 : 0;
2151 #ifdef CONFIG_MTD_OTP
2153 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2154 u_long data_offset, u_char *buf, u_int size,
2155 u_long prot_offset, u_int groupno, u_int groupsize);
2158 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2159 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2161 struct cfi_private *cfi = map->fldrv_priv;
2164 mutex_lock(&chip->mutex);
2165 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2167 mutex_unlock(&chip->mutex);
2171 /* let's ensure we're not reading back cached data from array mode */
2172 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2174 xip_disable(map, chip, chip->start);
2175 if (chip->state != FL_JEDEC_QUERY) {
2176 map_write(map, CMD(0x90), chip->start);
2177 chip->state = FL_JEDEC_QUERY;
2179 map_copy_from(map, buf, chip->start + offset, size);
2180 xip_enable(map, chip, chip->start);
2182 /* then ensure we don't keep OTP data in the cache */
2183 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2185 put_chip(map, chip, chip->start);
2186 mutex_unlock(&chip->mutex);
2191 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2192 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2197 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2198 int gap = offset - bus_ofs;
2199 int n = min_t(int, size, map_bankwidth(map)-gap);
2200 map_word datum = map_word_ff(map);
2202 datum = map_word_load_partial(map, datum, buf, gap, n);
2203 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2216 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2217 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2219 struct cfi_private *cfi = map->fldrv_priv;
2222 /* make sure area matches group boundaries */
2226 datum = map_word_ff(map);
2227 datum = map_word_clr(map, datum, CMD(1 << grpno));
2228 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2231 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2232 size_t *retlen, u_char *buf,
2233 otp_op_t action, int user_regs)
2235 struct map_info *map = mtd->priv;
2236 struct cfi_private *cfi = map->fldrv_priv;
2237 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2238 struct flchip *chip;
2239 struct cfi_intelext_otpinfo *otp;
2240 u_long devsize, reg_prot_offset, data_offset;
2241 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2242 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2247 /* Check that we actually have some OTP registers */
2248 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2251 /* we need real chips here not virtual ones */
2252 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2253 chip_step = devsize >> cfi->chipshift;
2256 /* Some chips have OTP located in the _top_ partition only.
2257 For example: Intel 28F256L18T (T means top-parameter device) */
2258 if (cfi->mfr == CFI_MFR_INTEL) {
2263 chip_num = chip_step - 1;
2267 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2268 chip = &cfi->chips[chip_num];
2269 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2271 /* first OTP region */
2273 reg_prot_offset = extp->ProtRegAddr;
2274 reg_fact_groups = 1;
2275 reg_fact_size = 1 << extp->FactProtRegSize;
2276 reg_user_groups = 1;
2277 reg_user_size = 1 << extp->UserProtRegSize;
2280 /* flash geometry fixup */
2281 data_offset = reg_prot_offset + 1;
2282 data_offset *= cfi->interleave * cfi->device_type;
2283 reg_prot_offset *= cfi->interleave * cfi->device_type;
2284 reg_fact_size *= cfi->interleave;
2285 reg_user_size *= cfi->interleave;
2288 groups = reg_user_groups;
2289 groupsize = reg_user_size;
2290 /* skip over factory reg area */
2291 groupno = reg_fact_groups;
2292 data_offset += reg_fact_groups * reg_fact_size;
2294 groups = reg_fact_groups;
2295 groupsize = reg_fact_size;
2299 while (len > 0 && groups > 0) {
2302 * Special case: if action is NULL
2303 * we fill buf with otp_info records.
2305 struct otp_info *otpinfo;
2307 len -= sizeof(struct otp_info);
2310 ret = do_otp_read(map, chip,
2312 (u_char *)&lockword,
2317 otpinfo = (struct otp_info *)buf;
2318 otpinfo->start = from;
2319 otpinfo->length = groupsize;
2321 !map_word_bitsset(map, lockword,
2324 buf += sizeof(*otpinfo);
2325 *retlen += sizeof(*otpinfo);
2326 } else if (from >= groupsize) {
2328 data_offset += groupsize;
2330 int size = groupsize;
2331 data_offset += from;
2336 ret = action(map, chip, data_offset,
2337 buf, size, reg_prot_offset,
2338 groupno, groupsize);
2344 data_offset += size;
2350 /* next OTP region */
2351 if (++field == extp->NumProtectionFields)
2353 reg_prot_offset = otp->ProtRegAddr;
2354 reg_fact_groups = otp->FactGroups;
2355 reg_fact_size = 1 << otp->FactProtRegSize;
2356 reg_user_groups = otp->UserGroups;
2357 reg_user_size = 1 << otp->UserProtRegSize;
2365 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2366 size_t len, size_t *retlen,
2369 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2370 buf, do_otp_read, 0);
2373 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2374 size_t len, size_t *retlen,
2377 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2378 buf, do_otp_read, 1);
2381 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2382 size_t len, size_t *retlen,
2385 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2386 buf, do_otp_write, 1);
2389 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2390 loff_t from, size_t len)
2393 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2394 NULL, do_otp_lock, 1);
2397 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2398 size_t *retlen, struct otp_info *buf)
2401 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2405 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2406 size_t *retlen, struct otp_info *buf)
2408 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2414 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2416 struct mtd_erase_region_info *region;
2417 int block, status, i;
2421 for (i = 0; i < mtd->numeraseregions; i++) {
2422 region = &mtd->eraseregions[i];
2423 if (!region->lockmap)
2426 for (block = 0; block < region->numblocks; block++){
2427 len = region->erasesize;
2428 adr = region->offset + block * len;
2430 status = cfi_varsize_frob(mtd,
2431 do_getlockstatus_oneblock, adr, len, NULL);
2433 set_bit(block, region->lockmap);
2435 clear_bit(block, region->lockmap);
2440 static int cfi_intelext_suspend(struct mtd_info *mtd)
2442 struct map_info *map = mtd->priv;
2443 struct cfi_private *cfi = map->fldrv_priv;
2444 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2446 struct flchip *chip;
2449 if ((mtd->flags & MTD_POWERUP_LOCK)
2450 && extp && (extp->FeatureSupport & (1 << 5)))
2451 cfi_intelext_save_locks(mtd);
2453 for (i=0; !ret && i<cfi->numchips; i++) {
2454 chip = &cfi->chips[i];
2456 mutex_lock(&chip->mutex);
2458 switch (chip->state) {
2462 case FL_JEDEC_QUERY:
2463 if (chip->oldstate == FL_READY) {
2464 /* place the chip in a known state before suspend */
2465 map_write(map, CMD(0xFF), cfi->chips[i].start);
2466 chip->oldstate = chip->state;
2467 chip->state = FL_PM_SUSPENDED;
2468 /* No need to wake_up() on this state change -
2469 * as the whole point is that nobody can do anything
2470 * with the chip now anyway.
2473 /* There seems to be an operation pending. We must wait for it. */
2474 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2479 /* Should we actually wait? Once upon a time these routines weren't
2480 allowed to. Or should we return -EAGAIN, because the upper layers
2481 ought to have already shut down anything which was using the device
2482 anyway? The latter for now. */
2483 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2485 case FL_PM_SUSPENDED:
2488 mutex_unlock(&chip->mutex);
2491 /* Unlock the chips again */
2494 for (i--; i >=0; i--) {
2495 chip = &cfi->chips[i];
2497 mutex_lock(&chip->mutex);
2499 if (chip->state == FL_PM_SUSPENDED) {
2500 /* No need to force it into a known state here,
2501 because we're returning failure, and it didn't
2503 chip->state = chip->oldstate;
2504 chip->oldstate = FL_READY;
2507 mutex_unlock(&chip->mutex);
2514 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2516 struct mtd_erase_region_info *region;
2521 for (i = 0; i < mtd->numeraseregions; i++) {
2522 region = &mtd->eraseregions[i];
2523 if (!region->lockmap)
2526 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2527 len = region->erasesize;
2528 adr = region->offset + block * len;
2529 cfi_intelext_unlock(mtd, adr, len);
2534 static void cfi_intelext_resume(struct mtd_info *mtd)
2536 struct map_info *map = mtd->priv;
2537 struct cfi_private *cfi = map->fldrv_priv;
2538 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2540 struct flchip *chip;
2542 for (i=0; i<cfi->numchips; i++) {
2544 chip = &cfi->chips[i];
2546 mutex_lock(&chip->mutex);
2548 /* Go to known state. Chip may have been power cycled */
2549 if (chip->state == FL_PM_SUSPENDED) {
2550 map_write(map, CMD(0xFF), cfi->chips[i].start);
2551 chip->oldstate = chip->state = FL_READY;
2555 mutex_unlock(&chip->mutex);
2558 if ((mtd->flags & MTD_POWERUP_LOCK)
2559 && extp && (extp->FeatureSupport & (1 << 5)))
2560 cfi_intelext_restore_locks(mtd);
2563 static int cfi_intelext_reset(struct mtd_info *mtd)
2565 struct map_info *map = mtd->priv;
2566 struct cfi_private *cfi = map->fldrv_priv;
2569 for (i=0; i < cfi->numchips; i++) {
2570 struct flchip *chip = &cfi->chips[i];
2572 /* force the completion of any ongoing operation
2573 and switch to array mode so any bootloader in
2574 flash is accessible for soft reboot. */
2575 mutex_lock(&chip->mutex);
2576 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2578 map_write(map, CMD(0xff), chip->start);
2579 chip->state = FL_SHUTDOWN;
2580 put_chip(map, chip, chip->start);
2582 mutex_unlock(&chip->mutex);
2588 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2591 struct mtd_info *mtd;
2593 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2594 cfi_intelext_reset(mtd);
2598 static void cfi_intelext_destroy(struct mtd_info *mtd)
2600 struct map_info *map = mtd->priv;
2601 struct cfi_private *cfi = map->fldrv_priv;
2602 struct mtd_erase_region_info *region;
2604 cfi_intelext_reset(mtd);
2605 unregister_reboot_notifier(&mtd->reboot_notifier);
2606 kfree(cfi->cmdset_priv);
2608 kfree(cfi->chips[0].priv);
2610 for (i = 0; i < mtd->numeraseregions; i++) {
2611 region = &mtd->eraseregions[i];
2612 if (region->lockmap)
2613 kfree(region->lockmap);
2615 kfree(mtd->eraseregions);
2618 MODULE_LICENSE("GPL");
2619 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2620 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2621 MODULE_ALIAS("cfi_cmdset_0003");
2622 MODULE_ALIAS("cfi_cmdset_0200");