Merge tag 'extcon-next-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 /* STMicroelectronics chips */
49 #define M50LPW080       0x002F
50 #define M50FLW080A      0x0080
51 #define M50FLW080B      0x0081
52 /* Atmel chips */
53 #define AT49BV640D      0x02de
54 #define AT49BV640DT     0x02db
55
56 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
60 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
61 static void cfi_intelext_sync (struct mtd_info *);
62 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
65                                   uint64_t len);
66 #ifdef CONFIG_MTD_OTP
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
72                                            size_t *, struct otp_info *);
73 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
74                                            size_t *, struct otp_info *);
75 #endif
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
79
80 static void cfi_intelext_destroy(struct mtd_info *);
81
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
83
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
86
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88                      size_t *retlen, void **virt, resource_size_t *phys);
89 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
90
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94 #include "fwh_lock.h"
95
96
97
98 /*
99  *  *********** SETUP AND PROBE BITS  ***********
100  */
101
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103         .probe          = NULL, /* Not usable directly */
104         .destroy        = cfi_intelext_destroy,
105         .name           = "cfi_cmdset_0001",
106         .module         = THIS_MODULE
107 };
108
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
111
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
114 {
115         int i;
116         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
118         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129         for (i=11; i<32; i++) {
130                 if (extp->FeatureSupport & (1<<i))
131                         printk("     - Unknown Bit %X:      supported\n", i);
132         }
133
134         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136         for (i=1; i<8; i++) {
137                 if (extp->SuspendCmdSupport & (1<<i))
138                         printk("     - Unknown Bit %X:               supported\n", i);
139         }
140
141         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144         for (i=2; i<3; i++) {
145                 if (extp->BlkStatusRegMask & (1<<i))
146                         printk("     - Unknown Bit %X Active: yes\n",i);
147         }
148         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150         for (i=6; i<16; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154
155         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157         if (extp->VppOptimal)
158                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
160 }
161 #endif
162
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
165 {
166         struct map_info *map = mtd->priv;
167         struct cfi_private *cfi = map->fldrv_priv;
168         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169         struct cfi_pri_atmel atmel_pri;
170         uint32_t features = 0;
171
172         /* Reverse byteswapping */
173         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
176
177         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
179
180         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
181
182         if (atmel_pri.Features & 0x01) /* chip erase supported */
183                 features |= (1<<0);
184         if (atmel_pri.Features & 0x02) /* erase suspend supported */
185                 features |= (1<<1);
186         if (atmel_pri.Features & 0x04) /* program suspend supported */
187                 features |= (1<<2);
188         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
189                 features |= (1<<9);
190         if (atmel_pri.Features & 0x20) /* page mode read supported */
191                 features |= (1<<7);
192         if (atmel_pri.Features & 0x40) /* queued erase supported */
193                 features |= (1<<4);
194         if (atmel_pri.Features & 0x80) /* Protection bits supported */
195                 features |= (1<<6);
196
197         extp->FeatureSupport = features;
198
199         /* burst write mode not supported */
200         cfi->cfiq->BufWriteTimeoutTyp = 0;
201         cfi->cfiq->BufWriteTimeoutMax = 0;
202 }
203
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
205 {
206         struct map_info *map = mtd->priv;
207         struct cfi_private *cfi = map->fldrv_priv;
208         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209
210         cfip->FeatureSupport |= (1 << 5);
211         mtd->flags |= MTD_POWERUP_LOCK;
212 }
213
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
221
222         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223                             "erase on write disabled.\n");
224         extp->SuspendCmdSupport &= ~1;
225 }
226 #endif
227
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
234
235         if (cfip && (cfip->FeatureSupport&4)) {
236                 cfip->FeatureSupport &= ~4;
237                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
238         }
239 }
240 #endif
241
242 static void fixup_st_m28w320ct(struct mtd_info *mtd)
243 {
244         struct map_info *map = mtd->priv;
245         struct cfi_private *cfi = map->fldrv_priv;
246
247         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
248         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
249 }
250
251 static void fixup_st_m28w320cb(struct mtd_info *mtd)
252 {
253         struct map_info *map = mtd->priv;
254         struct cfi_private *cfi = map->fldrv_priv;
255
256         /* Note this is done after the region info is endian swapped */
257         cfi->cfiq->EraseRegionInfo[1] =
258                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259 };
260
261 static void fixup_use_point(struct mtd_info *mtd)
262 {
263         struct map_info *map = mtd->priv;
264         if (!mtd->_point && map_is_linear(map)) {
265                 mtd->_point   = cfi_intelext_point;
266                 mtd->_unpoint = cfi_intelext_unpoint;
267         }
268 }
269
270 static void fixup_use_write_buffers(struct mtd_info *mtd)
271 {
272         struct map_info *map = mtd->priv;
273         struct cfi_private *cfi = map->fldrv_priv;
274         if (cfi->cfiq->BufWriteTimeoutTyp) {
275                 printk(KERN_INFO "Using buffer write method\n" );
276                 mtd->_write = cfi_intelext_write_buffers;
277                 mtd->_writev = cfi_intelext_writev;
278         }
279 }
280
281 /*
282  * Some chips power-up with all sectors locked by default.
283  */
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
285 {
286         struct map_info *map = mtd->priv;
287         struct cfi_private *cfi = map->fldrv_priv;
288         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
289
290         if (cfip->FeatureSupport&32) {
291                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292                 mtd->flags |= MTD_POWERUP_LOCK;
293         }
294 }
295
296 static struct cfi_fixup cfi_fixup_table[] = {
297         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
298         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
299         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
302 #endif
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
305 #endif
306 #if !FORCE_WORD_WRITE
307         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
308 #endif
309         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
312         { 0, 0, NULL }
313 };
314
315 static struct cfi_fixup jedec_fixup_table[] = {
316         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
317         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
318         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
319         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
320         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
321         { 0, 0, NULL }
322 };
323 static struct cfi_fixup fixup_table[] = {
324         /* The CFI vendor ids and the JEDEC vendor IDs appear
325          * to be common.  It is like the devices id's are as
326          * well.  This table is to pick all cases where
327          * we know that is the case.
328          */
329         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
330         { 0, 0, NULL }
331 };
332
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334                                                 struct cfi_pri_intelext *extp)
335 {
336         if (cfi->mfr == CFI_MFR_INTEL &&
337                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
338                 extp->MinorVersion = '1';
339 }
340
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
343 {
344         struct cfi_private *cfi = map->fldrv_priv;
345         struct cfi_pri_intelext *extp;
346         unsigned int extra_size = 0;
347         unsigned int extp_size = sizeof(*extp);
348
349  again:
350         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
351         if (!extp)
352                 return NULL;
353
354         cfi_fixup_major_minor(cfi, extp);
355
356         if (extp->MajorVersion != '1' ||
357             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
359                        "version %c.%c.\n",  extp->MajorVersion,
360                        extp->MinorVersion);
361                 kfree(extp);
362                 return NULL;
363         }
364
365         /* Do some byteswapping if necessary */
366         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
369
370         if (extp->MinorVersion >= '0') {
371                 extra_size = 0;
372
373                 /* Protection Register info */
374                 extra_size += (extp->NumProtectionFields - 1) *
375                               sizeof(struct cfi_intelext_otpinfo);
376         }
377
378         if (extp->MinorVersion >= '1') {
379                 /* Burst Read info */
380                 extra_size += 2;
381                 if (extp_size < sizeof(*extp) + extra_size)
382                         goto need_more;
383                 extra_size += extp->extra[extra_size - 1];
384         }
385
386         if (extp->MinorVersion >= '3') {
387                 int nb_parts, i;
388
389                 /* Number of hardware-partitions */
390                 extra_size += 1;
391                 if (extp_size < sizeof(*extp) + extra_size)
392                         goto need_more;
393                 nb_parts = extp->extra[extra_size - 1];
394
395                 /* skip the sizeof(partregion) field in CFI 1.4 */
396                 if (extp->MinorVersion >= '4')
397                         extra_size += 2;
398
399                 for (i = 0; i < nb_parts; i++) {
400                         struct cfi_intelext_regioninfo *rinfo;
401                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402                         extra_size += sizeof(*rinfo);
403                         if (extp_size < sizeof(*extp) + extra_size)
404                                 goto need_more;
405                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406                         extra_size += (rinfo->NumBlockTypes - 1)
407                                       * sizeof(struct cfi_intelext_blockinfo);
408                 }
409
410                 if (extp->MinorVersion >= '4')
411                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
412
413                 if (extp_size < sizeof(*extp) + extra_size) {
414                         need_more:
415                         extp_size = sizeof(*extp) + extra_size;
416                         kfree(extp);
417                         if (extp_size > 4096) {
418                                 printk(KERN_ERR
419                                         "%s: cfi_pri_intelext is too fat\n",
420                                         __func__);
421                                 return NULL;
422                         }
423                         goto again;
424                 }
425         }
426
427         return extp;
428 }
429
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
431 {
432         struct cfi_private *cfi = map->fldrv_priv;
433         struct mtd_info *mtd;
434         int i;
435
436         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
437         if (!mtd)
438                 return NULL;
439         mtd->priv = map;
440         mtd->type = MTD_NORFLASH;
441
442         /* Fill in the default mtd operations */
443         mtd->_erase   = cfi_intelext_erase_varsize;
444         mtd->_read    = cfi_intelext_read;
445         mtd->_write   = cfi_intelext_write_words;
446         mtd->_sync    = cfi_intelext_sync;
447         mtd->_lock    = cfi_intelext_lock;
448         mtd->_unlock  = cfi_intelext_unlock;
449         mtd->_is_locked = cfi_intelext_is_locked;
450         mtd->_suspend = cfi_intelext_suspend;
451         mtd->_resume  = cfi_intelext_resume;
452         mtd->flags   = MTD_CAP_NORFLASH;
453         mtd->name    = map->name;
454         mtd->writesize = 1;
455         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
456
457         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
458
459         if (cfi->cfi_mode == CFI_MODE_CFI) {
460                 /*
461                  * It's a real CFI chip, not one for which the probe
462                  * routine faked a CFI structure. So we read the feature
463                  * table from it.
464                  */
465                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
466                 struct cfi_pri_intelext *extp;
467
468                 extp = read_pri_intelext(map, adr);
469                 if (!extp) {
470                         kfree(mtd);
471                         return NULL;
472                 }
473
474                 /* Install our own private info structure */
475                 cfi->cmdset_priv = extp;
476
477                 cfi_fixup(mtd, cfi_fixup_table);
478
479 #ifdef DEBUG_CFI_FEATURES
480                 /* Tell the user about it in lots of lovely detail */
481                 cfi_tell_features(extp);
482 #endif
483
484                 if(extp->SuspendCmdSupport & 1) {
485                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
486                 }
487         }
488         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
489                 /* Apply jedec specific fixups */
490                 cfi_fixup(mtd, jedec_fixup_table);
491         }
492         /* Apply generic fixups */
493         cfi_fixup(mtd, fixup_table);
494
495         for (i=0; i< cfi->numchips; i++) {
496                 if (cfi->cfiq->WordWriteTimeoutTyp)
497                         cfi->chips[i].word_write_time =
498                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
499                 else
500                         cfi->chips[i].word_write_time = 50000;
501
502                 if (cfi->cfiq->BufWriteTimeoutTyp)
503                         cfi->chips[i].buffer_write_time =
504                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
505                 /* No default; if it isn't specified, we won't use it */
506
507                 if (cfi->cfiq->BlockEraseTimeoutTyp)
508                         cfi->chips[i].erase_time =
509                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
510                 else
511                         cfi->chips[i].erase_time = 2000000;
512
513                 if (cfi->cfiq->WordWriteTimeoutTyp &&
514                     cfi->cfiq->WordWriteTimeoutMax)
515                         cfi->chips[i].word_write_time_max =
516                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
517                                     cfi->cfiq->WordWriteTimeoutMax);
518                 else
519                         cfi->chips[i].word_write_time_max = 50000 * 8;
520
521                 if (cfi->cfiq->BufWriteTimeoutTyp &&
522                     cfi->cfiq->BufWriteTimeoutMax)
523                         cfi->chips[i].buffer_write_time_max =
524                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
525                                     cfi->cfiq->BufWriteTimeoutMax);
526
527                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
528                     cfi->cfiq->BlockEraseTimeoutMax)
529                         cfi->chips[i].erase_time_max =
530                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
531                                        cfi->cfiq->BlockEraseTimeoutMax);
532                 else
533                         cfi->chips[i].erase_time_max = 2000000 * 8;
534
535                 cfi->chips[i].ref_point_counter = 0;
536                 init_waitqueue_head(&(cfi->chips[i].wq));
537         }
538
539         map->fldrv = &cfi_intelext_chipdrv;
540
541         return cfi_intelext_setup(mtd);
542 }
543 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
544 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
545 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
546 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
548
549 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
550 {
551         struct map_info *map = mtd->priv;
552         struct cfi_private *cfi = map->fldrv_priv;
553         unsigned long offset = 0;
554         int i,j;
555         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
556
557         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
558
559         mtd->size = devsize * cfi->numchips;
560
561         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
562         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
563                         * mtd->numeraseregions, GFP_KERNEL);
564         if (!mtd->eraseregions)
565                 goto setup_err;
566
567         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
568                 unsigned long ernum, ersize;
569                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
570                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
571
572                 if (mtd->erasesize < ersize) {
573                         mtd->erasesize = ersize;
574                 }
575                 for (j=0; j<cfi->numchips; j++) {
576                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
577                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
578                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
579                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
580                 }
581                 offset += (ersize * ernum);
582         }
583
584         if (offset != devsize) {
585                 /* Argh */
586                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
587                 goto setup_err;
588         }
589
590         for (i=0; i<mtd->numeraseregions;i++){
591                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
592                        i,(unsigned long long)mtd->eraseregions[i].offset,
593                        mtd->eraseregions[i].erasesize,
594                        mtd->eraseregions[i].numblocks);
595         }
596
597 #ifdef CONFIG_MTD_OTP
598         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
599         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
600         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
601         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
602         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
603         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
604 #endif
605
606         /* This function has the potential to distort the reality
607            a bit and therefore should be called last. */
608         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
609                 goto setup_err;
610
611         __module_get(THIS_MODULE);
612         register_reboot_notifier(&mtd->reboot_notifier);
613         return mtd;
614
615  setup_err:
616         kfree(mtd->eraseregions);
617         kfree(mtd);
618         kfree(cfi->cmdset_priv);
619         return NULL;
620 }
621
622 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
623                                         struct cfi_private **pcfi)
624 {
625         struct map_info *map = mtd->priv;
626         struct cfi_private *cfi = *pcfi;
627         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
628
629         /*
630          * Probing of multi-partition flash chips.
631          *
632          * To support multiple partitions when available, we simply arrange
633          * for each of them to have their own flchip structure even if they
634          * are on the same physical chip.  This means completely recreating
635          * a new cfi_private structure right here which is a blatent code
636          * layering violation, but this is still the least intrusive
637          * arrangement at this point. This can be rearranged in the future
638          * if someone feels motivated enough.  --nico
639          */
640         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
641             && extp->FeatureSupport & (1 << 9)) {
642                 struct cfi_private *newcfi;
643                 struct flchip *chip;
644                 struct flchip_shared *shared;
645                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
646
647                 /* Protection Register info */
648                 offs = (extp->NumProtectionFields - 1) *
649                        sizeof(struct cfi_intelext_otpinfo);
650
651                 /* Burst Read info */
652                 offs += extp->extra[offs+1]+2;
653
654                 /* Number of partition regions */
655                 numregions = extp->extra[offs];
656                 offs += 1;
657
658                 /* skip the sizeof(partregion) field in CFI 1.4 */
659                 if (extp->MinorVersion >= '4')
660                         offs += 2;
661
662                 /* Number of hardware partitions */
663                 numparts = 0;
664                 for (i = 0; i < numregions; i++) {
665                         struct cfi_intelext_regioninfo *rinfo;
666                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
667                         numparts += rinfo->NumIdentPartitions;
668                         offs += sizeof(*rinfo)
669                                 + (rinfo->NumBlockTypes - 1) *
670                                   sizeof(struct cfi_intelext_blockinfo);
671                 }
672
673                 if (!numparts)
674                         numparts = 1;
675
676                 /* Programming Region info */
677                 if (extp->MinorVersion >= '4') {
678                         struct cfi_intelext_programming_regioninfo *prinfo;
679                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
680                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
681                         mtd->flags &= ~MTD_BIT_WRITEABLE;
682                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
683                                map->name, mtd->writesize,
684                                cfi->interleave * prinfo->ControlValid,
685                                cfi->interleave * prinfo->ControlInvalid);
686                 }
687
688                 /*
689                  * All functions below currently rely on all chips having
690                  * the same geometry so we'll just assume that all hardware
691                  * partitions are of the same size too.
692                  */
693                 partshift = cfi->chipshift - __ffs(numparts);
694
695                 if ((1 << partshift) < mtd->erasesize) {
696                         printk( KERN_ERR
697                                 "%s: bad number of hw partitions (%d)\n",
698                                 __func__, numparts);
699                         return -EINVAL;
700                 }
701
702                 numvirtchips = cfi->numchips * numparts;
703                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
704                 if (!newcfi)
705                         return -ENOMEM;
706                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
707                 if (!shared) {
708                         kfree(newcfi);
709                         return -ENOMEM;
710                 }
711                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
712                 newcfi->numchips = numvirtchips;
713                 newcfi->chipshift = partshift;
714
715                 chip = &newcfi->chips[0];
716                 for (i = 0; i < cfi->numchips; i++) {
717                         shared[i].writing = shared[i].erasing = NULL;
718                         mutex_init(&shared[i].lock);
719                         for (j = 0; j < numparts; j++) {
720                                 *chip = cfi->chips[i];
721                                 chip->start += j << partshift;
722                                 chip->priv = &shared[i];
723                                 /* those should be reset too since
724                                    they create memory references. */
725                                 init_waitqueue_head(&chip->wq);
726                                 mutex_init(&chip->mutex);
727                                 chip++;
728                         }
729                 }
730
731                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
732                                   "--> %d partitions of %d KiB\n",
733                                   map->name, cfi->numchips, cfi->interleave,
734                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
735
736                 map->fldrv_priv = newcfi;
737                 *pcfi = newcfi;
738                 kfree(cfi);
739         }
740
741         return 0;
742 }
743
744 /*
745  *  *********** CHIP ACCESS FUNCTIONS ***********
746  */
747 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
748 {
749         DECLARE_WAITQUEUE(wait, current);
750         struct cfi_private *cfi = map->fldrv_priv;
751         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
752         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
753         unsigned long timeo = jiffies + HZ;
754
755         /* Prevent setting state FL_SYNCING for chip in suspended state. */
756         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
757                 goto sleep;
758
759         switch (chip->state) {
760
761         case FL_STATUS:
762                 for (;;) {
763                         status = map_read(map, adr);
764                         if (map_word_andequal(map, status, status_OK, status_OK))
765                                 break;
766
767                         /* At this point we're fine with write operations
768                            in other partitions as they don't conflict. */
769                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
770                                 break;
771
772                         mutex_unlock(&chip->mutex);
773                         cfi_udelay(1);
774                         mutex_lock(&chip->mutex);
775                         /* Someone else might have been playing with it. */
776                         return -EAGAIN;
777                 }
778                 /* Fall through */
779         case FL_READY:
780         case FL_CFI_QUERY:
781         case FL_JEDEC_QUERY:
782                 return 0;
783
784         case FL_ERASING:
785                 if (!cfip ||
786                     !(cfip->FeatureSupport & 2) ||
787                     !(mode == FL_READY || mode == FL_POINT ||
788                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
789                         goto sleep;
790
791
792                 /* Erase suspend */
793                 map_write(map, CMD(0xB0), adr);
794
795                 /* If the flash has finished erasing, then 'erase suspend'
796                  * appears to make some (28F320) flash devices switch to
797                  * 'read' mode.  Make sure that we switch to 'read status'
798                  * mode so we get the right data. --rmk
799                  */
800                 map_write(map, CMD(0x70), adr);
801                 chip->oldstate = FL_ERASING;
802                 chip->state = FL_ERASE_SUSPENDING;
803                 chip->erase_suspended = 1;
804                 for (;;) {
805                         status = map_read(map, adr);
806                         if (map_word_andequal(map, status, status_OK, status_OK))
807                                 break;
808
809                         if (time_after(jiffies, timeo)) {
810                                 /* Urgh. Resume and pretend we weren't here.
811                                  * Make sure we're in 'read status' mode if it had finished */
812                                 put_chip(map, chip, adr);
813                                 printk(KERN_ERR "%s: Chip not ready after erase "
814                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
815                                 return -EIO;
816                         }
817
818                         mutex_unlock(&chip->mutex);
819                         cfi_udelay(1);
820                         mutex_lock(&chip->mutex);
821                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
822                            So we can just loop here. */
823                 }
824                 chip->state = FL_STATUS;
825                 return 0;
826
827         case FL_XIP_WHILE_ERASING:
828                 if (mode != FL_READY && mode != FL_POINT &&
829                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
830                         goto sleep;
831                 chip->oldstate = chip->state;
832                 chip->state = FL_READY;
833                 return 0;
834
835         case FL_SHUTDOWN:
836                 /* The machine is rebooting now,so no one can get chip anymore */
837                 return -EIO;
838         case FL_POINT:
839                 /* Only if there's no operation suspended... */
840                 if (mode == FL_READY && chip->oldstate == FL_READY)
841                         return 0;
842                 /* Fall through */
843         default:
844         sleep:
845                 set_current_state(TASK_UNINTERRUPTIBLE);
846                 add_wait_queue(&chip->wq, &wait);
847                 mutex_unlock(&chip->mutex);
848                 schedule();
849                 remove_wait_queue(&chip->wq, &wait);
850                 mutex_lock(&chip->mutex);
851                 return -EAGAIN;
852         }
853 }
854
855 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
856 {
857         int ret;
858         DECLARE_WAITQUEUE(wait, current);
859
860  retry:
861         if (chip->priv &&
862             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
863             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
864                 /*
865                  * OK. We have possibility for contention on the write/erase
866                  * operations which are global to the real chip and not per
867                  * partition.  So let's fight it over in the partition which
868                  * currently has authority on the operation.
869                  *
870                  * The rules are as follows:
871                  *
872                  * - any write operation must own shared->writing.
873                  *
874                  * - any erase operation must own _both_ shared->writing and
875                  *   shared->erasing.
876                  *
877                  * - contention arbitration is handled in the owner's context.
878                  *
879                  * The 'shared' struct can be read and/or written only when
880                  * its lock is taken.
881                  */
882                 struct flchip_shared *shared = chip->priv;
883                 struct flchip *contender;
884                 mutex_lock(&shared->lock);
885                 contender = shared->writing;
886                 if (contender && contender != chip) {
887                         /*
888                          * The engine to perform desired operation on this
889                          * partition is already in use by someone else.
890                          * Let's fight over it in the context of the chip
891                          * currently using it.  If it is possible to suspend,
892                          * that other partition will do just that, otherwise
893                          * it'll happily send us to sleep.  In any case, when
894                          * get_chip returns success we're clear to go ahead.
895                          */
896                         ret = mutex_trylock(&contender->mutex);
897                         mutex_unlock(&shared->lock);
898                         if (!ret)
899                                 goto retry;
900                         mutex_unlock(&chip->mutex);
901                         ret = chip_ready(map, contender, contender->start, mode);
902                         mutex_lock(&chip->mutex);
903
904                         if (ret == -EAGAIN) {
905                                 mutex_unlock(&contender->mutex);
906                                 goto retry;
907                         }
908                         if (ret) {
909                                 mutex_unlock(&contender->mutex);
910                                 return ret;
911                         }
912                         mutex_lock(&shared->lock);
913
914                         /* We should not own chip if it is already
915                          * in FL_SYNCING state. Put contender and retry. */
916                         if (chip->state == FL_SYNCING) {
917                                 put_chip(map, contender, contender->start);
918                                 mutex_unlock(&contender->mutex);
919                                 goto retry;
920                         }
921                         mutex_unlock(&contender->mutex);
922                 }
923
924                 /* Check if we already have suspended erase
925                  * on this chip. Sleep. */
926                 if (mode == FL_ERASING && shared->erasing
927                     && shared->erasing->oldstate == FL_ERASING) {
928                         mutex_unlock(&shared->lock);
929                         set_current_state(TASK_UNINTERRUPTIBLE);
930                         add_wait_queue(&chip->wq, &wait);
931                         mutex_unlock(&chip->mutex);
932                         schedule();
933                         remove_wait_queue(&chip->wq, &wait);
934                         mutex_lock(&chip->mutex);
935                         goto retry;
936                 }
937
938                 /* We now own it */
939                 shared->writing = chip;
940                 if (mode == FL_ERASING)
941                         shared->erasing = chip;
942                 mutex_unlock(&shared->lock);
943         }
944         ret = chip_ready(map, chip, adr, mode);
945         if (ret == -EAGAIN)
946                 goto retry;
947
948         return ret;
949 }
950
951 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
952 {
953         struct cfi_private *cfi = map->fldrv_priv;
954
955         if (chip->priv) {
956                 struct flchip_shared *shared = chip->priv;
957                 mutex_lock(&shared->lock);
958                 if (shared->writing == chip && chip->oldstate == FL_READY) {
959                         /* We own the ability to write, but we're done */
960                         shared->writing = shared->erasing;
961                         if (shared->writing && shared->writing != chip) {
962                                 /* give back ownership to who we loaned it from */
963                                 struct flchip *loaner = shared->writing;
964                                 mutex_lock(&loaner->mutex);
965                                 mutex_unlock(&shared->lock);
966                                 mutex_unlock(&chip->mutex);
967                                 put_chip(map, loaner, loaner->start);
968                                 mutex_lock(&chip->mutex);
969                                 mutex_unlock(&loaner->mutex);
970                                 wake_up(&chip->wq);
971                                 return;
972                         }
973                         shared->erasing = NULL;
974                         shared->writing = NULL;
975                 } else if (shared->erasing == chip && shared->writing != chip) {
976                         /*
977                          * We own the ability to erase without the ability
978                          * to write, which means the erase was suspended
979                          * and some other partition is currently writing.
980                          * Don't let the switch below mess things up since
981                          * we don't have ownership to resume anything.
982                          */
983                         mutex_unlock(&shared->lock);
984                         wake_up(&chip->wq);
985                         return;
986                 }
987                 mutex_unlock(&shared->lock);
988         }
989
990         switch(chip->oldstate) {
991         case FL_ERASING:
992                 /* What if one interleaved chip has finished and the
993                    other hasn't? The old code would leave the finished
994                    one in READY mode. That's bad, and caused -EROFS
995                    errors to be returned from do_erase_oneblock because
996                    that's the only bit it checked for at the time.
997                    As the state machine appears to explicitly allow
998                    sending the 0x70 (Read Status) command to an erasing
999                    chip and expecting it to be ignored, that's what we
1000                    do. */
1001                 map_write(map, CMD(0xd0), adr);
1002                 map_write(map, CMD(0x70), adr);
1003                 chip->oldstate = FL_READY;
1004                 chip->state = FL_ERASING;
1005                 break;
1006
1007         case FL_XIP_WHILE_ERASING:
1008                 chip->state = chip->oldstate;
1009                 chip->oldstate = FL_READY;
1010                 break;
1011
1012         case FL_READY:
1013         case FL_STATUS:
1014         case FL_JEDEC_QUERY:
1015                 break;
1016         default:
1017                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1018         }
1019         wake_up(&chip->wq);
1020 }
1021
1022 #ifdef CONFIG_MTD_XIP
1023
1024 /*
1025  * No interrupt what so ever can be serviced while the flash isn't in array
1026  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1027  * enclosing any code path where the flash is known not to be in array mode.
1028  * And within a XIP disabled code path, only functions marked with __xipram
1029  * may be called and nothing else (it's a good thing to inspect generated
1030  * assembly to make sure inline functions were actually inlined and that gcc
1031  * didn't emit calls to its own support functions). Also configuring MTD CFI
1032  * support to a single buswidth and a single interleave is also recommended.
1033  */
1034
1035 static void xip_disable(struct map_info *map, struct flchip *chip,
1036                         unsigned long adr)
1037 {
1038         /* TODO: chips with no XIP use should ignore and return */
1039         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1040         local_irq_disable();
1041 }
1042
1043 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1044                                 unsigned long adr)
1045 {
1046         struct cfi_private *cfi = map->fldrv_priv;
1047         if (chip->state != FL_POINT && chip->state != FL_READY) {
1048                 map_write(map, CMD(0xff), adr);
1049                 chip->state = FL_READY;
1050         }
1051         (void) map_read(map, adr);
1052         xip_iprefetch();
1053         local_irq_enable();
1054 }
1055
1056 /*
1057  * When a delay is required for the flash operation to complete, the
1058  * xip_wait_for_operation() function is polling for both the given timeout
1059  * and pending (but still masked) hardware interrupts.  Whenever there is an
1060  * interrupt pending then the flash erase or write operation is suspended,
1061  * array mode restored and interrupts unmasked.  Task scheduling might also
1062  * happen at that point.  The CPU eventually returns from the interrupt or
1063  * the call to schedule() and the suspended flash operation is resumed for
1064  * the remaining of the delay period.
1065  *
1066  * Warning: this function _will_ fool interrupt latency tracing tools.
1067  */
1068
1069 static int __xipram xip_wait_for_operation(
1070                 struct map_info *map, struct flchip *chip,
1071                 unsigned long adr, unsigned int chip_op_time_max)
1072 {
1073         struct cfi_private *cfi = map->fldrv_priv;
1074         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1075         map_word status, OK = CMD(0x80);
1076         unsigned long usec, suspended, start, done;
1077         flstate_t oldstate, newstate;
1078
1079         start = xip_currtime();
1080         usec = chip_op_time_max;
1081         if (usec == 0)
1082                 usec = 500000;
1083         done = 0;
1084
1085         do {
1086                 cpu_relax();
1087                 if (xip_irqpending() && cfip &&
1088                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1089                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1090                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1091                         /*
1092                          * Let's suspend the erase or write operation when
1093                          * supported.  Note that we currently don't try to
1094                          * suspend interleaved chips if there is already
1095                          * another operation suspended (imagine what happens
1096                          * when one chip was already done with the current
1097                          * operation while another chip suspended it, then
1098                          * we resume the whole thing at once).  Yes, it
1099                          * can happen!
1100                          */
1101                         usec -= done;
1102                         map_write(map, CMD(0xb0), adr);
1103                         map_write(map, CMD(0x70), adr);
1104                         suspended = xip_currtime();
1105                         do {
1106                                 if (xip_elapsed_since(suspended) > 100000) {
1107                                         /*
1108                                          * The chip doesn't want to suspend
1109                                          * after waiting for 100 msecs.
1110                                          * This is a critical error but there
1111                                          * is not much we can do here.
1112                                          */
1113                                         return -EIO;
1114                                 }
1115                                 status = map_read(map, adr);
1116                         } while (!map_word_andequal(map, status, OK, OK));
1117
1118                         /* Suspend succeeded */
1119                         oldstate = chip->state;
1120                         if (oldstate == FL_ERASING) {
1121                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1122                                         break;
1123                                 newstate = FL_XIP_WHILE_ERASING;
1124                                 chip->erase_suspended = 1;
1125                         } else {
1126                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1127                                         break;
1128                                 newstate = FL_XIP_WHILE_WRITING;
1129                                 chip->write_suspended = 1;
1130                         }
1131                         chip->state = newstate;
1132                         map_write(map, CMD(0xff), adr);
1133                         (void) map_read(map, adr);
1134                         xip_iprefetch();
1135                         local_irq_enable();
1136                         mutex_unlock(&chip->mutex);
1137                         xip_iprefetch();
1138                         cond_resched();
1139
1140                         /*
1141                          * We're back.  However someone else might have
1142                          * decided to go write to the chip if we are in
1143                          * a suspended erase state.  If so let's wait
1144                          * until it's done.
1145                          */
1146                         mutex_lock(&chip->mutex);
1147                         while (chip->state != newstate) {
1148                                 DECLARE_WAITQUEUE(wait, current);
1149                                 set_current_state(TASK_UNINTERRUPTIBLE);
1150                                 add_wait_queue(&chip->wq, &wait);
1151                                 mutex_unlock(&chip->mutex);
1152                                 schedule();
1153                                 remove_wait_queue(&chip->wq, &wait);
1154                                 mutex_lock(&chip->mutex);
1155                         }
1156                         /* Disallow XIP again */
1157                         local_irq_disable();
1158
1159                         /* Resume the write or erase operation */
1160                         map_write(map, CMD(0xd0), adr);
1161                         map_write(map, CMD(0x70), adr);
1162                         chip->state = oldstate;
1163                         start = xip_currtime();
1164                 } else if (usec >= 1000000/HZ) {
1165                         /*
1166                          * Try to save on CPU power when waiting delay
1167                          * is at least a system timer tick period.
1168                          * No need to be extremely accurate here.
1169                          */
1170                         xip_cpu_idle();
1171                 }
1172                 status = map_read(map, adr);
1173                 done = xip_elapsed_since(start);
1174         } while (!map_word_andequal(map, status, OK, OK)
1175                  && done < usec);
1176
1177         return (done >= usec) ? -ETIME : 0;
1178 }
1179
1180 /*
1181  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1182  * the flash is actively programming or erasing since we have to poll for
1183  * the operation to complete anyway.  We can't do that in a generic way with
1184  * a XIP setup so do it before the actual flash operation in this case
1185  * and stub it out from INVAL_CACHE_AND_WAIT.
1186  */
1187 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1188         INVALIDATE_CACHED_RANGE(map, from, size)
1189
1190 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1191         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1192
1193 #else
1194
1195 #define xip_disable(map, chip, adr)
1196 #define xip_enable(map, chip, adr)
1197 #define XIP_INVAL_CACHED_RANGE(x...)
1198 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1199
1200 static int inval_cache_and_wait_for_operation(
1201                 struct map_info *map, struct flchip *chip,
1202                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1203                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1204 {
1205         struct cfi_private *cfi = map->fldrv_priv;
1206         map_word status, status_OK = CMD(0x80);
1207         int chip_state = chip->state;
1208         unsigned int timeo, sleep_time, reset_timeo;
1209
1210         mutex_unlock(&chip->mutex);
1211         if (inval_len)
1212                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1213         mutex_lock(&chip->mutex);
1214
1215         timeo = chip_op_time_max;
1216         if (!timeo)
1217                 timeo = 500000;
1218         reset_timeo = timeo;
1219         sleep_time = chip_op_time / 2;
1220
1221         for (;;) {
1222                 if (chip->state != chip_state) {
1223                         /* Someone's suspended the operation: sleep */
1224                         DECLARE_WAITQUEUE(wait, current);
1225                         set_current_state(TASK_UNINTERRUPTIBLE);
1226                         add_wait_queue(&chip->wq, &wait);
1227                         mutex_unlock(&chip->mutex);
1228                         schedule();
1229                         remove_wait_queue(&chip->wq, &wait);
1230                         mutex_lock(&chip->mutex);
1231                         continue;
1232                 }
1233
1234                 status = map_read(map, cmd_adr);
1235                 if (map_word_andequal(map, status, status_OK, status_OK))
1236                         break;
1237
1238                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1239                         /* Erase suspend occurred while sleep: reset timeout */
1240                         timeo = reset_timeo;
1241                         chip->erase_suspended = 0;
1242                 }
1243                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1244                         /* Write suspend occurred while sleep: reset timeout */
1245                         timeo = reset_timeo;
1246                         chip->write_suspended = 0;
1247                 }
1248                 if (!timeo) {
1249                         map_write(map, CMD(0x70), cmd_adr);
1250                         chip->state = FL_STATUS;
1251                         return -ETIME;
1252                 }
1253
1254                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1255                 mutex_unlock(&chip->mutex);
1256                 if (sleep_time >= 1000000/HZ) {
1257                         /*
1258                          * Half of the normal delay still remaining
1259                          * can be performed with a sleeping delay instead
1260                          * of busy waiting.
1261                          */
1262                         msleep(sleep_time/1000);
1263                         timeo -= sleep_time;
1264                         sleep_time = 1000000/HZ;
1265                 } else {
1266                         udelay(1);
1267                         cond_resched();
1268                         timeo--;
1269                 }
1270                 mutex_lock(&chip->mutex);
1271         }
1272
1273         /* Done and happy. */
1274         chip->state = FL_STATUS;
1275         return 0;
1276 }
1277
1278 #endif
1279
1280 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1281         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1282
1283
1284 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1285 {
1286         unsigned long cmd_addr;
1287         struct cfi_private *cfi = map->fldrv_priv;
1288         int ret = 0;
1289
1290         adr += chip->start;
1291
1292         /* Ensure cmd read/writes are aligned. */
1293         cmd_addr = adr & ~(map_bankwidth(map)-1);
1294
1295         mutex_lock(&chip->mutex);
1296
1297         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1298
1299         if (!ret) {
1300                 if (chip->state != FL_POINT && chip->state != FL_READY)
1301                         map_write(map, CMD(0xff), cmd_addr);
1302
1303                 chip->state = FL_POINT;
1304                 chip->ref_point_counter++;
1305         }
1306         mutex_unlock(&chip->mutex);
1307
1308         return ret;
1309 }
1310
1311 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1312                 size_t *retlen, void **virt, resource_size_t *phys)
1313 {
1314         struct map_info *map = mtd->priv;
1315         struct cfi_private *cfi = map->fldrv_priv;
1316         unsigned long ofs, last_end = 0;
1317         int chipnum;
1318         int ret = 0;
1319
1320         if (!map->virt)
1321                 return -EINVAL;
1322
1323         /* Now lock the chip(s) to POINT state */
1324
1325         /* ofs: offset within the first chip that the first read should start */
1326         chipnum = (from >> cfi->chipshift);
1327         ofs = from - (chipnum << cfi->chipshift);
1328
1329         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1330         if (phys)
1331                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1332
1333         while (len) {
1334                 unsigned long thislen;
1335
1336                 if (chipnum >= cfi->numchips)
1337                         break;
1338
1339                 /* We cannot point across chips that are virtually disjoint */
1340                 if (!last_end)
1341                         last_end = cfi->chips[chipnum].start;
1342                 else if (cfi->chips[chipnum].start != last_end)
1343                         break;
1344
1345                 if ((len + ofs -1) >> cfi->chipshift)
1346                         thislen = (1<<cfi->chipshift) - ofs;
1347                 else
1348                         thislen = len;
1349
1350                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1351                 if (ret)
1352                         break;
1353
1354                 *retlen += thislen;
1355                 len -= thislen;
1356
1357                 ofs = 0;
1358                 last_end += 1 << cfi->chipshift;
1359                 chipnum++;
1360         }
1361         return 0;
1362 }
1363
1364 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1365 {
1366         struct map_info *map = mtd->priv;
1367         struct cfi_private *cfi = map->fldrv_priv;
1368         unsigned long ofs;
1369         int chipnum, err = 0;
1370
1371         /* Now unlock the chip(s) POINT state */
1372
1373         /* ofs: offset within the first chip that the first read should start */
1374         chipnum = (from >> cfi->chipshift);
1375         ofs = from - (chipnum <<  cfi->chipshift);
1376
1377         while (len && !err) {
1378                 unsigned long thislen;
1379                 struct flchip *chip;
1380
1381                 chip = &cfi->chips[chipnum];
1382                 if (chipnum >= cfi->numchips)
1383                         break;
1384
1385                 if ((len + ofs -1) >> cfi->chipshift)
1386                         thislen = (1<<cfi->chipshift) - ofs;
1387                 else
1388                         thislen = len;
1389
1390                 mutex_lock(&chip->mutex);
1391                 if (chip->state == FL_POINT) {
1392                         chip->ref_point_counter--;
1393                         if(chip->ref_point_counter == 0)
1394                                 chip->state = FL_READY;
1395                 } else {
1396                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1397                         err = -EINVAL;
1398                 }
1399
1400                 put_chip(map, chip, chip->start);
1401                 mutex_unlock(&chip->mutex);
1402
1403                 len -= thislen;
1404                 ofs = 0;
1405                 chipnum++;
1406         }
1407
1408         return err;
1409 }
1410
1411 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1412 {
1413         unsigned long cmd_addr;
1414         struct cfi_private *cfi = map->fldrv_priv;
1415         int ret;
1416
1417         adr += chip->start;
1418
1419         /* Ensure cmd read/writes are aligned. */
1420         cmd_addr = adr & ~(map_bankwidth(map)-1);
1421
1422         mutex_lock(&chip->mutex);
1423         ret = get_chip(map, chip, cmd_addr, FL_READY);
1424         if (ret) {
1425                 mutex_unlock(&chip->mutex);
1426                 return ret;
1427         }
1428
1429         if (chip->state != FL_POINT && chip->state != FL_READY) {
1430                 map_write(map, CMD(0xff), cmd_addr);
1431
1432                 chip->state = FL_READY;
1433         }
1434
1435         map_copy_from(map, buf, adr, len);
1436
1437         put_chip(map, chip, cmd_addr);
1438
1439         mutex_unlock(&chip->mutex);
1440         return 0;
1441 }
1442
1443 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1444 {
1445         struct map_info *map = mtd->priv;
1446         struct cfi_private *cfi = map->fldrv_priv;
1447         unsigned long ofs;
1448         int chipnum;
1449         int ret = 0;
1450
1451         /* ofs: offset within the first chip that the first read should start */
1452         chipnum = (from >> cfi->chipshift);
1453         ofs = from - (chipnum <<  cfi->chipshift);
1454
1455         while (len) {
1456                 unsigned long thislen;
1457
1458                 if (chipnum >= cfi->numchips)
1459                         break;
1460
1461                 if ((len + ofs -1) >> cfi->chipshift)
1462                         thislen = (1<<cfi->chipshift) - ofs;
1463                 else
1464                         thislen = len;
1465
1466                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1467                 if (ret)
1468                         break;
1469
1470                 *retlen += thislen;
1471                 len -= thislen;
1472                 buf += thislen;
1473
1474                 ofs = 0;
1475                 chipnum++;
1476         }
1477         return ret;
1478 }
1479
1480 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1481                                      unsigned long adr, map_word datum, int mode)
1482 {
1483         struct cfi_private *cfi = map->fldrv_priv;
1484         map_word status, write_cmd;
1485         int ret=0;
1486
1487         adr += chip->start;
1488
1489         switch (mode) {
1490         case FL_WRITING:
1491                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1492                 break;
1493         case FL_OTP_WRITE:
1494                 write_cmd = CMD(0xc0);
1495                 break;
1496         default:
1497                 return -EINVAL;
1498         }
1499
1500         mutex_lock(&chip->mutex);
1501         ret = get_chip(map, chip, adr, mode);
1502         if (ret) {
1503                 mutex_unlock(&chip->mutex);
1504                 return ret;
1505         }
1506
1507         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1508         ENABLE_VPP(map);
1509         xip_disable(map, chip, adr);
1510         map_write(map, write_cmd, adr);
1511         map_write(map, datum, adr);
1512         chip->state = mode;
1513
1514         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1515                                    adr, map_bankwidth(map),
1516                                    chip->word_write_time,
1517                                    chip->word_write_time_max);
1518         if (ret) {
1519                 xip_enable(map, chip, adr);
1520                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1521                 goto out;
1522         }
1523
1524         /* check for errors */
1525         status = map_read(map, adr);
1526         if (map_word_bitsset(map, status, CMD(0x1a))) {
1527                 unsigned long chipstatus = MERGESTATUS(status);
1528
1529                 /* reset status */
1530                 map_write(map, CMD(0x50), adr);
1531                 map_write(map, CMD(0x70), adr);
1532                 xip_enable(map, chip, adr);
1533
1534                 if (chipstatus & 0x02) {
1535                         ret = -EROFS;
1536                 } else if (chipstatus & 0x08) {
1537                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1538                         ret = -EIO;
1539                 } else {
1540                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1541                         ret = -EINVAL;
1542                 }
1543
1544                 goto out;
1545         }
1546
1547         xip_enable(map, chip, adr);
1548  out:   DISABLE_VPP(map);
1549         put_chip(map, chip, adr);
1550         mutex_unlock(&chip->mutex);
1551         return ret;
1552 }
1553
1554
1555 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1556 {
1557         struct map_info *map = mtd->priv;
1558         struct cfi_private *cfi = map->fldrv_priv;
1559         int ret = 0;
1560         int chipnum;
1561         unsigned long ofs;
1562
1563         chipnum = to >> cfi->chipshift;
1564         ofs = to  - (chipnum << cfi->chipshift);
1565
1566         /* If it's not bus-aligned, do the first byte write */
1567         if (ofs & (map_bankwidth(map)-1)) {
1568                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1569                 int gap = ofs - bus_ofs;
1570                 int n;
1571                 map_word datum;
1572
1573                 n = min_t(int, len, map_bankwidth(map)-gap);
1574                 datum = map_word_ff(map);
1575                 datum = map_word_load_partial(map, datum, buf, gap, n);
1576
1577                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1578                                                bus_ofs, datum, FL_WRITING);
1579                 if (ret)
1580                         return ret;
1581
1582                 len -= n;
1583                 ofs += n;
1584                 buf += n;
1585                 (*retlen) += n;
1586
1587                 if (ofs >> cfi->chipshift) {
1588                         chipnum ++;
1589                         ofs = 0;
1590                         if (chipnum == cfi->numchips)
1591                                 return 0;
1592                 }
1593         }
1594
1595         while(len >= map_bankwidth(map)) {
1596                 map_word datum = map_word_load(map, buf);
1597
1598                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1599                                        ofs, datum, FL_WRITING);
1600                 if (ret)
1601                         return ret;
1602
1603                 ofs += map_bankwidth(map);
1604                 buf += map_bankwidth(map);
1605                 (*retlen) += map_bankwidth(map);
1606                 len -= map_bankwidth(map);
1607
1608                 if (ofs >> cfi->chipshift) {
1609                         chipnum ++;
1610                         ofs = 0;
1611                         if (chipnum == cfi->numchips)
1612                                 return 0;
1613                 }
1614         }
1615
1616         if (len & (map_bankwidth(map)-1)) {
1617                 map_word datum;
1618
1619                 datum = map_word_ff(map);
1620                 datum = map_word_load_partial(map, datum, buf, 0, len);
1621
1622                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1623                                        ofs, datum, FL_WRITING);
1624                 if (ret)
1625                         return ret;
1626
1627                 (*retlen) += len;
1628         }
1629
1630         return 0;
1631 }
1632
1633
1634 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1635                                     unsigned long adr, const struct kvec **pvec,
1636                                     unsigned long *pvec_seek, int len)
1637 {
1638         struct cfi_private *cfi = map->fldrv_priv;
1639         map_word status, write_cmd, datum;
1640         unsigned long cmd_adr;
1641         int ret, wbufsize, word_gap, words;
1642         const struct kvec *vec;
1643         unsigned long vec_seek;
1644         unsigned long initial_adr;
1645         int initial_len = len;
1646
1647         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1648         adr += chip->start;
1649         initial_adr = adr;
1650         cmd_adr = adr & ~(wbufsize-1);
1651
1652         /* Let's determine this according to the interleave only once */
1653         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1654
1655         mutex_lock(&chip->mutex);
1656         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1657         if (ret) {
1658                 mutex_unlock(&chip->mutex);
1659                 return ret;
1660         }
1661
1662         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1663         ENABLE_VPP(map);
1664         xip_disable(map, chip, cmd_adr);
1665
1666         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1667            [...], the device will not accept any more Write to Buffer commands".
1668            So we must check here and reset those bits if they're set. Otherwise
1669            we're just pissing in the wind */
1670         if (chip->state != FL_STATUS) {
1671                 map_write(map, CMD(0x70), cmd_adr);
1672                 chip->state = FL_STATUS;
1673         }
1674         status = map_read(map, cmd_adr);
1675         if (map_word_bitsset(map, status, CMD(0x30))) {
1676                 xip_enable(map, chip, cmd_adr);
1677                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1678                 xip_disable(map, chip, cmd_adr);
1679                 map_write(map, CMD(0x50), cmd_adr);
1680                 map_write(map, CMD(0x70), cmd_adr);
1681         }
1682
1683         chip->state = FL_WRITING_TO_BUFFER;
1684         map_write(map, write_cmd, cmd_adr);
1685         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1686         if (ret) {
1687                 /* Argh. Not ready for write to buffer */
1688                 map_word Xstatus = map_read(map, cmd_adr);
1689                 map_write(map, CMD(0x70), cmd_adr);
1690                 chip->state = FL_STATUS;
1691                 status = map_read(map, cmd_adr);
1692                 map_write(map, CMD(0x50), cmd_adr);
1693                 map_write(map, CMD(0x70), cmd_adr);
1694                 xip_enable(map, chip, cmd_adr);
1695                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1696                                 map->name, Xstatus.x[0], status.x[0]);
1697                 goto out;
1698         }
1699
1700         /* Figure out the number of words to write */
1701         word_gap = (-adr & (map_bankwidth(map)-1));
1702         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1703         if (!word_gap) {
1704                 words--;
1705         } else {
1706                 word_gap = map_bankwidth(map) - word_gap;
1707                 adr -= word_gap;
1708                 datum = map_word_ff(map);
1709         }
1710
1711         /* Write length of data to come */
1712         map_write(map, CMD(words), cmd_adr );
1713
1714         /* Write data */
1715         vec = *pvec;
1716         vec_seek = *pvec_seek;
1717         do {
1718                 int n = map_bankwidth(map) - word_gap;
1719                 if (n > vec->iov_len - vec_seek)
1720                         n = vec->iov_len - vec_seek;
1721                 if (n > len)
1722                         n = len;
1723
1724                 if (!word_gap && len < map_bankwidth(map))
1725                         datum = map_word_ff(map);
1726
1727                 datum = map_word_load_partial(map, datum,
1728                                               vec->iov_base + vec_seek,
1729                                               word_gap, n);
1730
1731                 len -= n;
1732                 word_gap += n;
1733                 if (!len || word_gap == map_bankwidth(map)) {
1734                         map_write(map, datum, adr);
1735                         adr += map_bankwidth(map);
1736                         word_gap = 0;
1737                 }
1738
1739                 vec_seek += n;
1740                 if (vec_seek == vec->iov_len) {
1741                         vec++;
1742                         vec_seek = 0;
1743                 }
1744         } while (len);
1745         *pvec = vec;
1746         *pvec_seek = vec_seek;
1747
1748         /* GO GO GO */
1749         map_write(map, CMD(0xd0), cmd_adr);
1750         chip->state = FL_WRITING;
1751
1752         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1753                                    initial_adr, initial_len,
1754                                    chip->buffer_write_time,
1755                                    chip->buffer_write_time_max);
1756         if (ret) {
1757                 map_write(map, CMD(0x70), cmd_adr);
1758                 chip->state = FL_STATUS;
1759                 xip_enable(map, chip, cmd_adr);
1760                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1761                 goto out;
1762         }
1763
1764         /* check for errors */
1765         status = map_read(map, cmd_adr);
1766         if (map_word_bitsset(map, status, CMD(0x1a))) {
1767                 unsigned long chipstatus = MERGESTATUS(status);
1768
1769                 /* reset status */
1770                 map_write(map, CMD(0x50), cmd_adr);
1771                 map_write(map, CMD(0x70), cmd_adr);
1772                 xip_enable(map, chip, cmd_adr);
1773
1774                 if (chipstatus & 0x02) {
1775                         ret = -EROFS;
1776                 } else if (chipstatus & 0x08) {
1777                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1778                         ret = -EIO;
1779                 } else {
1780                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1781                         ret = -EINVAL;
1782                 }
1783
1784                 goto out;
1785         }
1786
1787         xip_enable(map, chip, cmd_adr);
1788  out:   DISABLE_VPP(map);
1789         put_chip(map, chip, cmd_adr);
1790         mutex_unlock(&chip->mutex);
1791         return ret;
1792 }
1793
1794 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1795                                 unsigned long count, loff_t to, size_t *retlen)
1796 {
1797         struct map_info *map = mtd->priv;
1798         struct cfi_private *cfi = map->fldrv_priv;
1799         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1800         int ret = 0;
1801         int chipnum;
1802         unsigned long ofs, vec_seek, i;
1803         size_t len = 0;
1804
1805         for (i = 0; i < count; i++)
1806                 len += vecs[i].iov_len;
1807
1808         if (!len)
1809                 return 0;
1810
1811         chipnum = to >> cfi->chipshift;
1812         ofs = to - (chipnum << cfi->chipshift);
1813         vec_seek = 0;
1814
1815         do {
1816                 /* We must not cross write block boundaries */
1817                 int size = wbufsize - (ofs & (wbufsize-1));
1818
1819                 if (size > len)
1820                         size = len;
1821                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1822                                       ofs, &vecs, &vec_seek, size);
1823                 if (ret)
1824                         return ret;
1825
1826                 ofs += size;
1827                 (*retlen) += size;
1828                 len -= size;
1829
1830                 if (ofs >> cfi->chipshift) {
1831                         chipnum ++;
1832                         ofs = 0;
1833                         if (chipnum == cfi->numchips)
1834                                 return 0;
1835                 }
1836
1837                 /* Be nice and reschedule with the chip in a usable state for other
1838                    processes. */
1839                 cond_resched();
1840
1841         } while (len);
1842
1843         return 0;
1844 }
1845
1846 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1847                                        size_t len, size_t *retlen, const u_char *buf)
1848 {
1849         struct kvec vec;
1850
1851         vec.iov_base = (void *) buf;
1852         vec.iov_len = len;
1853
1854         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1855 }
1856
1857 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1858                                       unsigned long adr, int len, void *thunk)
1859 {
1860         struct cfi_private *cfi = map->fldrv_priv;
1861         map_word status;
1862         int retries = 3;
1863         int ret;
1864
1865         adr += chip->start;
1866
1867  retry:
1868         mutex_lock(&chip->mutex);
1869         ret = get_chip(map, chip, adr, FL_ERASING);
1870         if (ret) {
1871                 mutex_unlock(&chip->mutex);
1872                 return ret;
1873         }
1874
1875         XIP_INVAL_CACHED_RANGE(map, adr, len);
1876         ENABLE_VPP(map);
1877         xip_disable(map, chip, adr);
1878
1879         /* Clear the status register first */
1880         map_write(map, CMD(0x50), adr);
1881
1882         /* Now erase */
1883         map_write(map, CMD(0x20), adr);
1884         map_write(map, CMD(0xD0), adr);
1885         chip->state = FL_ERASING;
1886         chip->erase_suspended = 0;
1887
1888         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1889                                    adr, len,
1890                                    chip->erase_time,
1891                                    chip->erase_time_max);
1892         if (ret) {
1893                 map_write(map, CMD(0x70), adr);
1894                 chip->state = FL_STATUS;
1895                 xip_enable(map, chip, adr);
1896                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1897                 goto out;
1898         }
1899
1900         /* We've broken this before. It doesn't hurt to be safe */
1901         map_write(map, CMD(0x70), adr);
1902         chip->state = FL_STATUS;
1903         status = map_read(map, adr);
1904
1905         /* check for errors */
1906         if (map_word_bitsset(map, status, CMD(0x3a))) {
1907                 unsigned long chipstatus = MERGESTATUS(status);
1908
1909                 /* Reset the error bits */
1910                 map_write(map, CMD(0x50), adr);
1911                 map_write(map, CMD(0x70), adr);
1912                 xip_enable(map, chip, adr);
1913
1914                 if ((chipstatus & 0x30) == 0x30) {
1915                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1916                         ret = -EINVAL;
1917                 } else if (chipstatus & 0x02) {
1918                         /* Protection bit set */
1919                         ret = -EROFS;
1920                 } else if (chipstatus & 0x8) {
1921                         /* Voltage */
1922                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1923                         ret = -EIO;
1924                 } else if (chipstatus & 0x20 && retries--) {
1925                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1926                         DISABLE_VPP(map);
1927                         put_chip(map, chip, adr);
1928                         mutex_unlock(&chip->mutex);
1929                         goto retry;
1930                 } else {
1931                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1932                         ret = -EIO;
1933                 }
1934
1935                 goto out;
1936         }
1937
1938         xip_enable(map, chip, adr);
1939  out:   DISABLE_VPP(map);
1940         put_chip(map, chip, adr);
1941         mutex_unlock(&chip->mutex);
1942         return ret;
1943 }
1944
1945 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1946 {
1947         unsigned long ofs, len;
1948         int ret;
1949
1950         ofs = instr->addr;
1951         len = instr->len;
1952
1953         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1954         if (ret)
1955                 return ret;
1956
1957         instr->state = MTD_ERASE_DONE;
1958         mtd_erase_callback(instr);
1959
1960         return 0;
1961 }
1962
1963 static void cfi_intelext_sync (struct mtd_info *mtd)
1964 {
1965         struct map_info *map = mtd->priv;
1966         struct cfi_private *cfi = map->fldrv_priv;
1967         int i;
1968         struct flchip *chip;
1969         int ret = 0;
1970
1971         for (i=0; !ret && i<cfi->numchips; i++) {
1972                 chip = &cfi->chips[i];
1973
1974                 mutex_lock(&chip->mutex);
1975                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1976
1977                 if (!ret) {
1978                         chip->oldstate = chip->state;
1979                         chip->state = FL_SYNCING;
1980                         /* No need to wake_up() on this state change -
1981                          * as the whole point is that nobody can do anything
1982                          * with the chip now anyway.
1983                          */
1984                 }
1985                 mutex_unlock(&chip->mutex);
1986         }
1987
1988         /* Unlock the chips again */
1989
1990         for (i--; i >=0; i--) {
1991                 chip = &cfi->chips[i];
1992
1993                 mutex_lock(&chip->mutex);
1994
1995                 if (chip->state == FL_SYNCING) {
1996                         chip->state = chip->oldstate;
1997                         chip->oldstate = FL_READY;
1998                         wake_up(&chip->wq);
1999                 }
2000                 mutex_unlock(&chip->mutex);
2001         }
2002 }
2003
2004 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2005                                                 struct flchip *chip,
2006                                                 unsigned long adr,
2007                                                 int len, void *thunk)
2008 {
2009         struct cfi_private *cfi = map->fldrv_priv;
2010         int status, ofs_factor = cfi->interleave * cfi->device_type;
2011
2012         adr += chip->start;
2013         xip_disable(map, chip, adr+(2*ofs_factor));
2014         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2015         chip->state = FL_JEDEC_QUERY;
2016         status = cfi_read_query(map, adr+(2*ofs_factor));
2017         xip_enable(map, chip, 0);
2018         return status;
2019 }
2020
2021 #ifdef DEBUG_LOCK_BITS
2022 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2023                                                 struct flchip *chip,
2024                                                 unsigned long adr,
2025                                                 int len, void *thunk)
2026 {
2027         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2028                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2029         return 0;
2030 }
2031 #endif
2032
2033 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2034 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2035
2036 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2037                                        unsigned long adr, int len, void *thunk)
2038 {
2039         struct cfi_private *cfi = map->fldrv_priv;
2040         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2041         int mdelay;
2042         int ret;
2043
2044         adr += chip->start;
2045
2046         mutex_lock(&chip->mutex);
2047         ret = get_chip(map, chip, adr, FL_LOCKING);
2048         if (ret) {
2049                 mutex_unlock(&chip->mutex);
2050                 return ret;
2051         }
2052
2053         ENABLE_VPP(map);
2054         xip_disable(map, chip, adr);
2055
2056         map_write(map, CMD(0x60), adr);
2057         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2058                 map_write(map, CMD(0x01), adr);
2059                 chip->state = FL_LOCKING;
2060         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2061                 map_write(map, CMD(0xD0), adr);
2062                 chip->state = FL_UNLOCKING;
2063         } else
2064                 BUG();
2065
2066         /*
2067          * If Instant Individual Block Locking supported then no need
2068          * to delay.
2069          */
2070         /*
2071          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2072          * lets use a max of 1.5 seconds (1500ms) as timeout.
2073          *
2074          * See "Clear Block Lock-Bits Time" on page 40 in
2075          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2076          * from February 2003
2077          */
2078         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2079
2080         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2081         if (ret) {
2082                 map_write(map, CMD(0x70), adr);
2083                 chip->state = FL_STATUS;
2084                 xip_enable(map, chip, adr);
2085                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2086                 goto out;
2087         }
2088
2089         xip_enable(map, chip, adr);
2090  out:   DISABLE_VPP(map);
2091         put_chip(map, chip, adr);
2092         mutex_unlock(&chip->mutex);
2093         return ret;
2094 }
2095
2096 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2097 {
2098         int ret;
2099
2100 #ifdef DEBUG_LOCK_BITS
2101         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2102                __func__, ofs, len);
2103         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2104                 ofs, len, NULL);
2105 #endif
2106
2107         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2108                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2109
2110 #ifdef DEBUG_LOCK_BITS
2111         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2112                __func__, ret);
2113         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2114                 ofs, len, NULL);
2115 #endif
2116
2117         return ret;
2118 }
2119
2120 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2121 {
2122         int ret;
2123
2124 #ifdef DEBUG_LOCK_BITS
2125         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2126                __func__, ofs, len);
2127         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2128                 ofs, len, NULL);
2129 #endif
2130
2131         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2132                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2133
2134 #ifdef DEBUG_LOCK_BITS
2135         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2136                __func__, ret);
2137         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2138                 ofs, len, NULL);
2139 #endif
2140
2141         return ret;
2142 }
2143
2144 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2145                                   uint64_t len)
2146 {
2147         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2148                                 ofs, len, NULL) ? 1 : 0;
2149 }
2150
2151 #ifdef CONFIG_MTD_OTP
2152
2153 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2154                         u_long data_offset, u_char *buf, u_int size,
2155                         u_long prot_offset, u_int groupno, u_int groupsize);
2156
2157 static int __xipram
2158 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2159             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2160 {
2161         struct cfi_private *cfi = map->fldrv_priv;
2162         int ret;
2163
2164         mutex_lock(&chip->mutex);
2165         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2166         if (ret) {
2167                 mutex_unlock(&chip->mutex);
2168                 return ret;
2169         }
2170
2171         /* let's ensure we're not reading back cached data from array mode */
2172         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2173
2174         xip_disable(map, chip, chip->start);
2175         if (chip->state != FL_JEDEC_QUERY) {
2176                 map_write(map, CMD(0x90), chip->start);
2177                 chip->state = FL_JEDEC_QUERY;
2178         }
2179         map_copy_from(map, buf, chip->start + offset, size);
2180         xip_enable(map, chip, chip->start);
2181
2182         /* then ensure we don't keep OTP data in the cache */
2183         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2184
2185         put_chip(map, chip, chip->start);
2186         mutex_unlock(&chip->mutex);
2187         return 0;
2188 }
2189
2190 static int
2191 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2192              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2193 {
2194         int ret;
2195
2196         while (size) {
2197                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2198                 int gap = offset - bus_ofs;
2199                 int n = min_t(int, size, map_bankwidth(map)-gap);
2200                 map_word datum = map_word_ff(map);
2201
2202                 datum = map_word_load_partial(map, datum, buf, gap, n);
2203                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2204                 if (ret)
2205                         return ret;
2206
2207                 offset += n;
2208                 buf += n;
2209                 size -= n;
2210         }
2211
2212         return 0;
2213 }
2214
2215 static int
2216 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2217             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2218 {
2219         struct cfi_private *cfi = map->fldrv_priv;
2220         map_word datum;
2221
2222         /* make sure area matches group boundaries */
2223         if (size != grpsz)
2224                 return -EXDEV;
2225
2226         datum = map_word_ff(map);
2227         datum = map_word_clr(map, datum, CMD(1 << grpno));
2228         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2229 }
2230
2231 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2232                                  size_t *retlen, u_char *buf,
2233                                  otp_op_t action, int user_regs)
2234 {
2235         struct map_info *map = mtd->priv;
2236         struct cfi_private *cfi = map->fldrv_priv;
2237         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2238         struct flchip *chip;
2239         struct cfi_intelext_otpinfo *otp;
2240         u_long devsize, reg_prot_offset, data_offset;
2241         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2242         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2243         int ret;
2244
2245         *retlen = 0;
2246
2247         /* Check that we actually have some OTP registers */
2248         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2249                 return -ENODATA;
2250
2251         /* we need real chips here not virtual ones */
2252         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2253         chip_step = devsize >> cfi->chipshift;
2254         chip_num = 0;
2255
2256         /* Some chips have OTP located in the _top_ partition only.
2257            For example: Intel 28F256L18T (T means top-parameter device) */
2258         if (cfi->mfr == CFI_MFR_INTEL) {
2259                 switch (cfi->id) {
2260                 case 0x880b:
2261                 case 0x880c:
2262                 case 0x880d:
2263                         chip_num = chip_step - 1;
2264                 }
2265         }
2266
2267         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2268                 chip = &cfi->chips[chip_num];
2269                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2270
2271                 /* first OTP region */
2272                 field = 0;
2273                 reg_prot_offset = extp->ProtRegAddr;
2274                 reg_fact_groups = 1;
2275                 reg_fact_size = 1 << extp->FactProtRegSize;
2276                 reg_user_groups = 1;
2277                 reg_user_size = 1 << extp->UserProtRegSize;
2278
2279                 while (len > 0) {
2280                         /* flash geometry fixup */
2281                         data_offset = reg_prot_offset + 1;
2282                         data_offset *= cfi->interleave * cfi->device_type;
2283                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2284                         reg_fact_size *= cfi->interleave;
2285                         reg_user_size *= cfi->interleave;
2286
2287                         if (user_regs) {
2288                                 groups = reg_user_groups;
2289                                 groupsize = reg_user_size;
2290                                 /* skip over factory reg area */
2291                                 groupno = reg_fact_groups;
2292                                 data_offset += reg_fact_groups * reg_fact_size;
2293                         } else {
2294                                 groups = reg_fact_groups;
2295                                 groupsize = reg_fact_size;
2296                                 groupno = 0;
2297                         }
2298
2299                         while (len > 0 && groups > 0) {
2300                                 if (!action) {
2301                                         /*
2302                                          * Special case: if action is NULL
2303                                          * we fill buf with otp_info records.
2304                                          */
2305                                         struct otp_info *otpinfo;
2306                                         map_word lockword;
2307                                         len -= sizeof(struct otp_info);
2308                                         if (len <= 0)
2309                                                 return -ENOSPC;
2310                                         ret = do_otp_read(map, chip,
2311                                                           reg_prot_offset,
2312                                                           (u_char *)&lockword,
2313                                                           map_bankwidth(map),
2314                                                           0, 0,  0);
2315                                         if (ret)
2316                                                 return ret;
2317                                         otpinfo = (struct otp_info *)buf;
2318                                         otpinfo->start = from;
2319                                         otpinfo->length = groupsize;
2320                                         otpinfo->locked =
2321                                            !map_word_bitsset(map, lockword,
2322                                                              CMD(1 << groupno));
2323                                         from += groupsize;
2324                                         buf += sizeof(*otpinfo);
2325                                         *retlen += sizeof(*otpinfo);
2326                                 } else if (from >= groupsize) {
2327                                         from -= groupsize;
2328                                         data_offset += groupsize;
2329                                 } else {
2330                                         int size = groupsize;
2331                                         data_offset += from;
2332                                         size -= from;
2333                                         from = 0;
2334                                         if (size > len)
2335                                                 size = len;
2336                                         ret = action(map, chip, data_offset,
2337                                                      buf, size, reg_prot_offset,
2338                                                      groupno, groupsize);
2339                                         if (ret < 0)
2340                                                 return ret;
2341                                         buf += size;
2342                                         len -= size;
2343                                         *retlen += size;
2344                                         data_offset += size;
2345                                 }
2346                                 groupno++;
2347                                 groups--;
2348                         }
2349
2350                         /* next OTP region */
2351                         if (++field == extp->NumProtectionFields)
2352                                 break;
2353                         reg_prot_offset = otp->ProtRegAddr;
2354                         reg_fact_groups = otp->FactGroups;
2355                         reg_fact_size = 1 << otp->FactProtRegSize;
2356                         reg_user_groups = otp->UserGroups;
2357                         reg_user_size = 1 << otp->UserProtRegSize;
2358                         otp++;
2359                 }
2360         }
2361
2362         return 0;
2363 }
2364
2365 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2366                                            size_t len, size_t *retlen,
2367                                             u_char *buf)
2368 {
2369         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2370                                      buf, do_otp_read, 0);
2371 }
2372
2373 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2374                                            size_t len, size_t *retlen,
2375                                             u_char *buf)
2376 {
2377         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2378                                      buf, do_otp_read, 1);
2379 }
2380
2381 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2382                                             size_t len, size_t *retlen,
2383                                              u_char *buf)
2384 {
2385         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2386                                      buf, do_otp_write, 1);
2387 }
2388
2389 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2390                                            loff_t from, size_t len)
2391 {
2392         size_t retlen;
2393         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2394                                      NULL, do_otp_lock, 1);
2395 }
2396
2397 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2398                                            size_t *retlen, struct otp_info *buf)
2399
2400 {
2401         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2402                                      NULL, 0);
2403 }
2404
2405 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2406                                            size_t *retlen, struct otp_info *buf)
2407 {
2408         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2409                                      NULL, 1);
2410 }
2411
2412 #endif
2413
2414 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2415 {
2416         struct mtd_erase_region_info *region;
2417         int block, status, i;
2418         unsigned long adr;
2419         size_t len;
2420
2421         for (i = 0; i < mtd->numeraseregions; i++) {
2422                 region = &mtd->eraseregions[i];
2423                 if (!region->lockmap)
2424                         continue;
2425
2426                 for (block = 0; block < region->numblocks; block++){
2427                         len = region->erasesize;
2428                         adr = region->offset + block * len;
2429
2430                         status = cfi_varsize_frob(mtd,
2431                                         do_getlockstatus_oneblock, adr, len, NULL);
2432                         if (status)
2433                                 set_bit(block, region->lockmap);
2434                         else
2435                                 clear_bit(block, region->lockmap);
2436                 }
2437         }
2438 }
2439
2440 static int cfi_intelext_suspend(struct mtd_info *mtd)
2441 {
2442         struct map_info *map = mtd->priv;
2443         struct cfi_private *cfi = map->fldrv_priv;
2444         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2445         int i;
2446         struct flchip *chip;
2447         int ret = 0;
2448
2449         if ((mtd->flags & MTD_POWERUP_LOCK)
2450             && extp && (extp->FeatureSupport & (1 << 5)))
2451                 cfi_intelext_save_locks(mtd);
2452
2453         for (i=0; !ret && i<cfi->numchips; i++) {
2454                 chip = &cfi->chips[i];
2455
2456                 mutex_lock(&chip->mutex);
2457
2458                 switch (chip->state) {
2459                 case FL_READY:
2460                 case FL_STATUS:
2461                 case FL_CFI_QUERY:
2462                 case FL_JEDEC_QUERY:
2463                         if (chip->oldstate == FL_READY) {
2464                                 /* place the chip in a known state before suspend */
2465                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2466                                 chip->oldstate = chip->state;
2467                                 chip->state = FL_PM_SUSPENDED;
2468                                 /* No need to wake_up() on this state change -
2469                                  * as the whole point is that nobody can do anything
2470                                  * with the chip now anyway.
2471                                  */
2472                         } else {
2473                                 /* There seems to be an operation pending. We must wait for it. */
2474                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2475                                 ret = -EAGAIN;
2476                         }
2477                         break;
2478                 default:
2479                         /* Should we actually wait? Once upon a time these routines weren't
2480                            allowed to. Or should we return -EAGAIN, because the upper layers
2481                            ought to have already shut down anything which was using the device
2482                            anyway? The latter for now. */
2483                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2484                         ret = -EAGAIN;
2485                 case FL_PM_SUSPENDED:
2486                         break;
2487                 }
2488                 mutex_unlock(&chip->mutex);
2489         }
2490
2491         /* Unlock the chips again */
2492
2493         if (ret) {
2494                 for (i--; i >=0; i--) {
2495                         chip = &cfi->chips[i];
2496
2497                         mutex_lock(&chip->mutex);
2498
2499                         if (chip->state == FL_PM_SUSPENDED) {
2500                                 /* No need to force it into a known state here,
2501                                    because we're returning failure, and it didn't
2502                                    get power cycled */
2503                                 chip->state = chip->oldstate;
2504                                 chip->oldstate = FL_READY;
2505                                 wake_up(&chip->wq);
2506                         }
2507                         mutex_unlock(&chip->mutex);
2508                 }
2509         }
2510
2511         return ret;
2512 }
2513
2514 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2515 {
2516         struct mtd_erase_region_info *region;
2517         int block, i;
2518         unsigned long adr;
2519         size_t len;
2520
2521         for (i = 0; i < mtd->numeraseregions; i++) {
2522                 region = &mtd->eraseregions[i];
2523                 if (!region->lockmap)
2524                         continue;
2525
2526                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2527                         len = region->erasesize;
2528                         adr = region->offset + block * len;
2529                         cfi_intelext_unlock(mtd, adr, len);
2530                 }
2531         }
2532 }
2533
2534 static void cfi_intelext_resume(struct mtd_info *mtd)
2535 {
2536         struct map_info *map = mtd->priv;
2537         struct cfi_private *cfi = map->fldrv_priv;
2538         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2539         int i;
2540         struct flchip *chip;
2541
2542         for (i=0; i<cfi->numchips; i++) {
2543
2544                 chip = &cfi->chips[i];
2545
2546                 mutex_lock(&chip->mutex);
2547
2548                 /* Go to known state. Chip may have been power cycled */
2549                 if (chip->state == FL_PM_SUSPENDED) {
2550                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2551                         chip->oldstate = chip->state = FL_READY;
2552                         wake_up(&chip->wq);
2553                 }
2554
2555                 mutex_unlock(&chip->mutex);
2556         }
2557
2558         if ((mtd->flags & MTD_POWERUP_LOCK)
2559             && extp && (extp->FeatureSupport & (1 << 5)))
2560                 cfi_intelext_restore_locks(mtd);
2561 }
2562
2563 static int cfi_intelext_reset(struct mtd_info *mtd)
2564 {
2565         struct map_info *map = mtd->priv;
2566         struct cfi_private *cfi = map->fldrv_priv;
2567         int i, ret;
2568
2569         for (i=0; i < cfi->numchips; i++) {
2570                 struct flchip *chip = &cfi->chips[i];
2571
2572                 /* force the completion of any ongoing operation
2573                    and switch to array mode so any bootloader in
2574                    flash is accessible for soft reboot. */
2575                 mutex_lock(&chip->mutex);
2576                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2577                 if (!ret) {
2578                         map_write(map, CMD(0xff), chip->start);
2579                         chip->state = FL_SHUTDOWN;
2580                         put_chip(map, chip, chip->start);
2581                 }
2582                 mutex_unlock(&chip->mutex);
2583         }
2584
2585         return 0;
2586 }
2587
2588 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2589                                void *v)
2590 {
2591         struct mtd_info *mtd;
2592
2593         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2594         cfi_intelext_reset(mtd);
2595         return NOTIFY_DONE;
2596 }
2597
2598 static void cfi_intelext_destroy(struct mtd_info *mtd)
2599 {
2600         struct map_info *map = mtd->priv;
2601         struct cfi_private *cfi = map->fldrv_priv;
2602         struct mtd_erase_region_info *region;
2603         int i;
2604         cfi_intelext_reset(mtd);
2605         unregister_reboot_notifier(&mtd->reboot_notifier);
2606         kfree(cfi->cmdset_priv);
2607         kfree(cfi->cfiq);
2608         kfree(cfi->chips[0].priv);
2609         kfree(cfi);
2610         for (i = 0; i < mtd->numeraseregions; i++) {
2611                 region = &mtd->eraseregions[i];
2612                 if (region->lockmap)
2613                         kfree(region->lockmap);
2614         }
2615         kfree(mtd->eraseregions);
2616 }
2617
2618 MODULE_LICENSE("GPL");
2619 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2620 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2621 MODULE_ALIAS("cfi_cmdset_0003");
2622 MODULE_ALIAS("cfi_cmdset_0200");