f5695be14499855a2a54071004e8107c3289f680
[sfrench/cifs-2.6.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 #define M28F00AP30      0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90     0x00b0
58 #define LH28F640BFHE_PBTL90     0x00b1
59 #define LH28F640BFHE_PTTL70A    0x00b2
60 #define LH28F640BFHE_PBTL70A    0x00b3
61
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71                                   uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78                                            size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85
86 static void cfi_intelext_destroy(struct mtd_info *);
87
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94                      size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101
102
103
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109         .probe          = NULL, /* Not usable directly */
110         .destroy        = cfi_intelext_destroy,
111         .name           = "cfi_cmdset_0001",
112         .module         = THIS_MODULE
113 };
114
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121         int i;
122         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135         for (i=11; i<32; i++) {
136                 if (extp->FeatureSupport & (1<<i))
137                         printk("     - Unknown Bit %X:      supported\n", i);
138         }
139
140         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142         for (i=1; i<8; i++) {
143                 if (extp->SuspendCmdSupport & (1<<i))
144                         printk("     - Unknown Bit %X:               supported\n", i);
145         }
146
147         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150         for (i=2; i<3; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156         for (i=6; i<16; i++) {
157                 if (extp->BlkStatusRegMask & (1<<i))
158                         printk("     - Unknown Bit %X Active: yes\n",i);
159         }
160
161         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163         if (extp->VppOptimal)
164                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172         struct map_info *map = mtd->priv;
173         struct cfi_private *cfi = map->fldrv_priv;
174         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175         struct cfi_pri_atmel atmel_pri;
176         uint32_t features = 0;
177
178         /* Reverse byteswapping */
179         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182
183         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185
186         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187
188         if (atmel_pri.Features & 0x01) /* chip erase supported */
189                 features |= (1<<0);
190         if (atmel_pri.Features & 0x02) /* erase suspend supported */
191                 features |= (1<<1);
192         if (atmel_pri.Features & 0x04) /* program suspend supported */
193                 features |= (1<<2);
194         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195                 features |= (1<<9);
196         if (atmel_pri.Features & 0x20) /* page mode read supported */
197                 features |= (1<<7);
198         if (atmel_pri.Features & 0x40) /* queued erase supported */
199                 features |= (1<<4);
200         if (atmel_pri.Features & 0x80) /* Protection bits supported */
201                 features |= (1<<6);
202
203         extp->FeatureSupport = features;
204
205         /* burst write mode not supported */
206         cfi->cfiq->BufWriteTimeoutTyp = 0;
207         cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212         struct map_info *map = mtd->priv;
213         struct cfi_private *cfi = map->fldrv_priv;
214         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215
216         cfip->FeatureSupport |= (1 << 5);
217         mtd->flags |= MTD_POWERUP_LOCK;
218 }
219
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224         struct map_info *map = mtd->priv;
225         struct cfi_private *cfi = map->fldrv_priv;
226         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227
228         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229                             "erase on write disabled.\n");
230         extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237         struct map_info *map = mtd->priv;
238         struct cfi_private *cfi = map->fldrv_priv;
239         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240
241         if (cfip && (cfip->FeatureSupport&4)) {
242                 cfip->FeatureSupport &= ~4;
243                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244         }
245 }
246 #endif
247
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250         struct map_info *map = mtd->priv;
251         struct cfi_private *cfi = map->fldrv_priv;
252
253         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
254         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
255 }
256
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261
262         /* Note this is done after the region info is endian swapped */
263         cfi->cfiq->EraseRegionInfo[1] =
264                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266
267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269         /* Sharp LH28F640BF Family */
270         if (cfi->mfr == CFI_MFR_SHARP && (
271             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273                 return 1;
274         return 0;
275 }
276
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279         struct map_info *map = mtd->priv;
280         struct cfi_private *cfi = map->fldrv_priv;
281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282
283         /* Reset the Partition Configuration Register on LH28F640BF
284          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285         if (is_LH28F640BF(cfi)) {
286                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287                 map_write(map, CMD(0x60), 0);
288                 map_write(map, CMD(0x04), 0);
289
290                 /* We have set one single partition thus
291                  * Simultaneous Operations are not allowed */
292                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293                 extp->FeatureSupport &= ~512;
294         }
295 }
296
297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299         struct map_info *map = mtd->priv;
300         if (!mtd->_point && map_is_linear(map)) {
301                 mtd->_point   = cfi_intelext_point;
302                 mtd->_unpoint = cfi_intelext_unpoint;
303         }
304 }
305
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308         struct map_info *map = mtd->priv;
309         struct cfi_private *cfi = map->fldrv_priv;
310         if (cfi->cfiq->BufWriteTimeoutTyp) {
311                 printk(KERN_INFO "Using buffer write method\n" );
312                 mtd->_write = cfi_intelext_write_buffers;
313                 mtd->_writev = cfi_intelext_writev;
314         }
315 }
316
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322         struct map_info *map = mtd->priv;
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325
326         if (cfip->FeatureSupport&32) {
327                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328                 mtd->flags |= MTD_POWERUP_LOCK;
329         }
330 }
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350         { 0, 0, NULL }
351 };
352
353 static struct cfi_fixup jedec_fixup_table[] = {
354         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359         { 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362         /* The CFI vendor ids and the JEDEC vendor IDs appear
363          * to be common.  It is like the devices id's are as
364          * well.  This table is to pick all cases where
365          * we know that is the case.
366          */
367         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368         { 0, 0, NULL }
369 };
370
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372                                                 struct cfi_pri_intelext *extp)
373 {
374         if (cfi->mfr == CFI_MFR_INTEL &&
375                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
376                 extp->MinorVersion = '1';
377 }
378
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381         /*
382          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383          * Erase Supend for their small Erase Blocks(0x8000)
384          */
385         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386                 return 1;
387         return 0;
388 }
389
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393         struct cfi_private *cfi = map->fldrv_priv;
394         struct cfi_pri_intelext *extp;
395         unsigned int extra_size = 0;
396         unsigned int extp_size = sizeof(*extp);
397
398  again:
399         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400         if (!extp)
401                 return NULL;
402
403         cfi_fixup_major_minor(cfi, extp);
404
405         if (extp->MajorVersion != '1' ||
406             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408                        "version %c.%c.\n",  extp->MajorVersion,
409                        extp->MinorVersion);
410                 kfree(extp);
411                 return NULL;
412         }
413
414         /* Do some byteswapping if necessary */
415         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418
419         if (extp->MinorVersion >= '0') {
420                 extra_size = 0;
421
422                 /* Protection Register info */
423                 extra_size += (extp->NumProtectionFields - 1) *
424                               sizeof(struct cfi_intelext_otpinfo);
425         }
426
427         if (extp->MinorVersion >= '1') {
428                 /* Burst Read info */
429                 extra_size += 2;
430                 if (extp_size < sizeof(*extp) + extra_size)
431                         goto need_more;
432                 extra_size += extp->extra[extra_size - 1];
433         }
434
435         if (extp->MinorVersion >= '3') {
436                 int nb_parts, i;
437
438                 /* Number of hardware-partitions */
439                 extra_size += 1;
440                 if (extp_size < sizeof(*extp) + extra_size)
441                         goto need_more;
442                 nb_parts = extp->extra[extra_size - 1];
443
444                 /* skip the sizeof(partregion) field in CFI 1.4 */
445                 if (extp->MinorVersion >= '4')
446                         extra_size += 2;
447
448                 for (i = 0; i < nb_parts; i++) {
449                         struct cfi_intelext_regioninfo *rinfo;
450                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451                         extra_size += sizeof(*rinfo);
452                         if (extp_size < sizeof(*extp) + extra_size)
453                                 goto need_more;
454                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455                         extra_size += (rinfo->NumBlockTypes - 1)
456                                       * sizeof(struct cfi_intelext_blockinfo);
457                 }
458
459                 if (extp->MinorVersion >= '4')
460                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
461
462                 if (extp_size < sizeof(*extp) + extra_size) {
463                         need_more:
464                         extp_size = sizeof(*extp) + extra_size;
465                         kfree(extp);
466                         if (extp_size > 4096) {
467                                 printk(KERN_ERR
468                                         "%s: cfi_pri_intelext is too fat\n",
469                                         __func__);
470                                 return NULL;
471                         }
472                         goto again;
473                 }
474         }
475
476         return extp;
477 }
478
479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
480 {
481         struct cfi_private *cfi = map->fldrv_priv;
482         struct mtd_info *mtd;
483         int i;
484
485         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
486         if (!mtd)
487                 return NULL;
488         mtd->priv = map;
489         mtd->type = MTD_NORFLASH;
490
491         /* Fill in the default mtd operations */
492         mtd->_erase   = cfi_intelext_erase_varsize;
493         mtd->_read    = cfi_intelext_read;
494         mtd->_write   = cfi_intelext_write_words;
495         mtd->_sync    = cfi_intelext_sync;
496         mtd->_lock    = cfi_intelext_lock;
497         mtd->_unlock  = cfi_intelext_unlock;
498         mtd->_is_locked = cfi_intelext_is_locked;
499         mtd->_suspend = cfi_intelext_suspend;
500         mtd->_resume  = cfi_intelext_resume;
501         mtd->flags   = MTD_CAP_NORFLASH;
502         mtd->name    = map->name;
503         mtd->writesize = 1;
504         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
505
506         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
507
508         if (cfi->cfi_mode == CFI_MODE_CFI) {
509                 /*
510                  * It's a real CFI chip, not one for which the probe
511                  * routine faked a CFI structure. So we read the feature
512                  * table from it.
513                  */
514                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515                 struct cfi_pri_intelext *extp;
516
517                 extp = read_pri_intelext(map, adr);
518                 if (!extp) {
519                         kfree(mtd);
520                         return NULL;
521                 }
522
523                 /* Install our own private info structure */
524                 cfi->cmdset_priv = extp;
525
526                 cfi_fixup(mtd, cfi_fixup_table);
527
528 #ifdef DEBUG_CFI_FEATURES
529                 /* Tell the user about it in lots of lovely detail */
530                 cfi_tell_features(extp);
531 #endif
532
533                 if(extp->SuspendCmdSupport & 1) {
534                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
535                 }
536         }
537         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538                 /* Apply jedec specific fixups */
539                 cfi_fixup(mtd, jedec_fixup_table);
540         }
541         /* Apply generic fixups */
542         cfi_fixup(mtd, fixup_table);
543
544         for (i=0; i< cfi->numchips; i++) {
545                 if (cfi->cfiq->WordWriteTimeoutTyp)
546                         cfi->chips[i].word_write_time =
547                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
548                 else
549                         cfi->chips[i].word_write_time = 50000;
550
551                 if (cfi->cfiq->BufWriteTimeoutTyp)
552                         cfi->chips[i].buffer_write_time =
553                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
554                 /* No default; if it isn't specified, we won't use it */
555
556                 if (cfi->cfiq->BlockEraseTimeoutTyp)
557                         cfi->chips[i].erase_time =
558                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
559                 else
560                         cfi->chips[i].erase_time = 2000000;
561
562                 if (cfi->cfiq->WordWriteTimeoutTyp &&
563                     cfi->cfiq->WordWriteTimeoutMax)
564                         cfi->chips[i].word_write_time_max =
565                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
566                                     cfi->cfiq->WordWriteTimeoutMax);
567                 else
568                         cfi->chips[i].word_write_time_max = 50000 * 8;
569
570                 if (cfi->cfiq->BufWriteTimeoutTyp &&
571                     cfi->cfiq->BufWriteTimeoutMax)
572                         cfi->chips[i].buffer_write_time_max =
573                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
574                                     cfi->cfiq->BufWriteTimeoutMax);
575
576                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
577                     cfi->cfiq->BlockEraseTimeoutMax)
578                         cfi->chips[i].erase_time_max =
579                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580                                        cfi->cfiq->BlockEraseTimeoutMax);
581                 else
582                         cfi->chips[i].erase_time_max = 2000000 * 8;
583
584                 cfi->chips[i].ref_point_counter = 0;
585                 init_waitqueue_head(&(cfi->chips[i].wq));
586         }
587
588         map->fldrv = &cfi_intelext_chipdrv;
589
590         return cfi_intelext_setup(mtd);
591 }
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
597
598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
599 {
600         struct map_info *map = mtd->priv;
601         struct cfi_private *cfi = map->fldrv_priv;
602         unsigned long offset = 0;
603         int i,j;
604         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
605
606         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
607
608         mtd->size = devsize * cfi->numchips;
609
610         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611         mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
612                         * mtd->numeraseregions, GFP_KERNEL);
613         if (!mtd->eraseregions)
614                 goto setup_err;
615
616         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
617                 unsigned long ernum, ersize;
618                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
619                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
620
621                 if (mtd->erasesize < ersize) {
622                         mtd->erasesize = ersize;
623                 }
624                 for (j=0; j<cfi->numchips; j++) {
625                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
626                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
627                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
628                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
629                         if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
630                                 goto setup_err;
631                 }
632                 offset += (ersize * ernum);
633         }
634
635         if (offset != devsize) {
636                 /* Argh */
637                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
638                 goto setup_err;
639         }
640
641         for (i=0; i<mtd->numeraseregions;i++){
642                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
643                        i,(unsigned long long)mtd->eraseregions[i].offset,
644                        mtd->eraseregions[i].erasesize,
645                        mtd->eraseregions[i].numblocks);
646         }
647
648 #ifdef CONFIG_MTD_OTP
649         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
650         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
651         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
652         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
653         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
654         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
655 #endif
656
657         /* This function has the potential to distort the reality
658            a bit and therefore should be called last. */
659         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
660                 goto setup_err;
661
662         __module_get(THIS_MODULE);
663         register_reboot_notifier(&mtd->reboot_notifier);
664         return mtd;
665
666  setup_err:
667         if (mtd->eraseregions)
668                 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
669                         for (j=0; j<cfi->numchips; j++)
670                                 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
671         kfree(mtd->eraseregions);
672         kfree(mtd);
673         kfree(cfi->cmdset_priv);
674         return NULL;
675 }
676
677 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
678                                         struct cfi_private **pcfi)
679 {
680         struct map_info *map = mtd->priv;
681         struct cfi_private *cfi = *pcfi;
682         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
683
684         /*
685          * Probing of multi-partition flash chips.
686          *
687          * To support multiple partitions when available, we simply arrange
688          * for each of them to have their own flchip structure even if they
689          * are on the same physical chip.  This means completely recreating
690          * a new cfi_private structure right here which is a blatent code
691          * layering violation, but this is still the least intrusive
692          * arrangement at this point. This can be rearranged in the future
693          * if someone feels motivated enough.  --nico
694          */
695         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
696             && extp->FeatureSupport & (1 << 9)) {
697                 struct cfi_private *newcfi;
698                 struct flchip *chip;
699                 struct flchip_shared *shared;
700                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
701
702                 /* Protection Register info */
703                 offs = (extp->NumProtectionFields - 1) *
704                        sizeof(struct cfi_intelext_otpinfo);
705
706                 /* Burst Read info */
707                 offs += extp->extra[offs+1]+2;
708
709                 /* Number of partition regions */
710                 numregions = extp->extra[offs];
711                 offs += 1;
712
713                 /* skip the sizeof(partregion) field in CFI 1.4 */
714                 if (extp->MinorVersion >= '4')
715                         offs += 2;
716
717                 /* Number of hardware partitions */
718                 numparts = 0;
719                 for (i = 0; i < numregions; i++) {
720                         struct cfi_intelext_regioninfo *rinfo;
721                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
722                         numparts += rinfo->NumIdentPartitions;
723                         offs += sizeof(*rinfo)
724                                 + (rinfo->NumBlockTypes - 1) *
725                                   sizeof(struct cfi_intelext_blockinfo);
726                 }
727
728                 if (!numparts)
729                         numparts = 1;
730
731                 /* Programming Region info */
732                 if (extp->MinorVersion >= '4') {
733                         struct cfi_intelext_programming_regioninfo *prinfo;
734                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
735                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
736                         mtd->flags &= ~MTD_BIT_WRITEABLE;
737                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
738                                map->name, mtd->writesize,
739                                cfi->interleave * prinfo->ControlValid,
740                                cfi->interleave * prinfo->ControlInvalid);
741                 }
742
743                 /*
744                  * All functions below currently rely on all chips having
745                  * the same geometry so we'll just assume that all hardware
746                  * partitions are of the same size too.
747                  */
748                 partshift = cfi->chipshift - __ffs(numparts);
749
750                 if ((1 << partshift) < mtd->erasesize) {
751                         printk( KERN_ERR
752                                 "%s: bad number of hw partitions (%d)\n",
753                                 __func__, numparts);
754                         return -EINVAL;
755                 }
756
757                 numvirtchips = cfi->numchips * numparts;
758                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
759                 if (!newcfi)
760                         return -ENOMEM;
761                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
762                 if (!shared) {
763                         kfree(newcfi);
764                         return -ENOMEM;
765                 }
766                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
767                 newcfi->numchips = numvirtchips;
768                 newcfi->chipshift = partshift;
769
770                 chip = &newcfi->chips[0];
771                 for (i = 0; i < cfi->numchips; i++) {
772                         shared[i].writing = shared[i].erasing = NULL;
773                         mutex_init(&shared[i].lock);
774                         for (j = 0; j < numparts; j++) {
775                                 *chip = cfi->chips[i];
776                                 chip->start += j << partshift;
777                                 chip->priv = &shared[i];
778                                 /* those should be reset too since
779                                    they create memory references. */
780                                 init_waitqueue_head(&chip->wq);
781                                 mutex_init(&chip->mutex);
782                                 chip++;
783                         }
784                 }
785
786                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
787                                   "--> %d partitions of %d KiB\n",
788                                   map->name, cfi->numchips, cfi->interleave,
789                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
790
791                 map->fldrv_priv = newcfi;
792                 *pcfi = newcfi;
793                 kfree(cfi);
794         }
795
796         return 0;
797 }
798
799 /*
800  *  *********** CHIP ACCESS FUNCTIONS ***********
801  */
802 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
803 {
804         DECLARE_WAITQUEUE(wait, current);
805         struct cfi_private *cfi = map->fldrv_priv;
806         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
807         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
808         unsigned long timeo = jiffies + HZ;
809
810         /* Prevent setting state FL_SYNCING for chip in suspended state. */
811         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
812                 goto sleep;
813
814         switch (chip->state) {
815
816         case FL_STATUS:
817                 for (;;) {
818                         status = map_read(map, adr);
819                         if (map_word_andequal(map, status, status_OK, status_OK))
820                                 break;
821
822                         /* At this point we're fine with write operations
823                            in other partitions as they don't conflict. */
824                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
825                                 break;
826
827                         mutex_unlock(&chip->mutex);
828                         cfi_udelay(1);
829                         mutex_lock(&chip->mutex);
830                         /* Someone else might have been playing with it. */
831                         return -EAGAIN;
832                 }
833                 /* Fall through */
834         case FL_READY:
835         case FL_CFI_QUERY:
836         case FL_JEDEC_QUERY:
837                 return 0;
838
839         case FL_ERASING:
840                 if (!cfip ||
841                     !(cfip->FeatureSupport & 2) ||
842                     !(mode == FL_READY || mode == FL_POINT ||
843                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
844                         goto sleep;
845
846                 /* Do not allow suspend iff read/write to EB address */
847                 if ((adr & chip->in_progress_block_mask) ==
848                     chip->in_progress_block_addr)
849                         goto sleep;
850
851                 /* do not suspend small EBs, buggy Micron Chips */
852                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
853                     (chip->in_progress_block_mask == ~(0x8000-1)))
854                         goto sleep;
855
856                 /* Erase suspend */
857                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
858
859                 /* If the flash has finished erasing, then 'erase suspend'
860                  * appears to make some (28F320) flash devices switch to
861                  * 'read' mode.  Make sure that we switch to 'read status'
862                  * mode so we get the right data. --rmk
863                  */
864                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
865                 chip->oldstate = FL_ERASING;
866                 chip->state = FL_ERASE_SUSPENDING;
867                 chip->erase_suspended = 1;
868                 for (;;) {
869                         status = map_read(map, chip->in_progress_block_addr);
870                         if (map_word_andequal(map, status, status_OK, status_OK))
871                                 break;
872
873                         if (time_after(jiffies, timeo)) {
874                                 /* Urgh. Resume and pretend we weren't here.
875                                  * Make sure we're in 'read status' mode if it had finished */
876                                 put_chip(map, chip, adr);
877                                 printk(KERN_ERR "%s: Chip not ready after erase "
878                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
879                                 return -EIO;
880                         }
881
882                         mutex_unlock(&chip->mutex);
883                         cfi_udelay(1);
884                         mutex_lock(&chip->mutex);
885                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
886                            So we can just loop here. */
887                 }
888                 chip->state = FL_STATUS;
889                 return 0;
890
891         case FL_XIP_WHILE_ERASING:
892                 if (mode != FL_READY && mode != FL_POINT &&
893                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
894                         goto sleep;
895                 chip->oldstate = chip->state;
896                 chip->state = FL_READY;
897                 return 0;
898
899         case FL_SHUTDOWN:
900                 /* The machine is rebooting now,so no one can get chip anymore */
901                 return -EIO;
902         case FL_POINT:
903                 /* Only if there's no operation suspended... */
904                 if (mode == FL_READY && chip->oldstate == FL_READY)
905                         return 0;
906                 /* Fall through */
907         default:
908         sleep:
909                 set_current_state(TASK_UNINTERRUPTIBLE);
910                 add_wait_queue(&chip->wq, &wait);
911                 mutex_unlock(&chip->mutex);
912                 schedule();
913                 remove_wait_queue(&chip->wq, &wait);
914                 mutex_lock(&chip->mutex);
915                 return -EAGAIN;
916         }
917 }
918
919 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
920 {
921         int ret;
922         DECLARE_WAITQUEUE(wait, current);
923
924  retry:
925         if (chip->priv &&
926             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
927             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
928                 /*
929                  * OK. We have possibility for contention on the write/erase
930                  * operations which are global to the real chip and not per
931                  * partition.  So let's fight it over in the partition which
932                  * currently has authority on the operation.
933                  *
934                  * The rules are as follows:
935                  *
936                  * - any write operation must own shared->writing.
937                  *
938                  * - any erase operation must own _both_ shared->writing and
939                  *   shared->erasing.
940                  *
941                  * - contention arbitration is handled in the owner's context.
942                  *
943                  * The 'shared' struct can be read and/or written only when
944                  * its lock is taken.
945                  */
946                 struct flchip_shared *shared = chip->priv;
947                 struct flchip *contender;
948                 mutex_lock(&shared->lock);
949                 contender = shared->writing;
950                 if (contender && contender != chip) {
951                         /*
952                          * The engine to perform desired operation on this
953                          * partition is already in use by someone else.
954                          * Let's fight over it in the context of the chip
955                          * currently using it.  If it is possible to suspend,
956                          * that other partition will do just that, otherwise
957                          * it'll happily send us to sleep.  In any case, when
958                          * get_chip returns success we're clear to go ahead.
959                          */
960                         ret = mutex_trylock(&contender->mutex);
961                         mutex_unlock(&shared->lock);
962                         if (!ret)
963                                 goto retry;
964                         mutex_unlock(&chip->mutex);
965                         ret = chip_ready(map, contender, contender->start, mode);
966                         mutex_lock(&chip->mutex);
967
968                         if (ret == -EAGAIN) {
969                                 mutex_unlock(&contender->mutex);
970                                 goto retry;
971                         }
972                         if (ret) {
973                                 mutex_unlock(&contender->mutex);
974                                 return ret;
975                         }
976                         mutex_lock(&shared->lock);
977
978                         /* We should not own chip if it is already
979                          * in FL_SYNCING state. Put contender and retry. */
980                         if (chip->state == FL_SYNCING) {
981                                 put_chip(map, contender, contender->start);
982                                 mutex_unlock(&contender->mutex);
983                                 goto retry;
984                         }
985                         mutex_unlock(&contender->mutex);
986                 }
987
988                 /* Check if we already have suspended erase
989                  * on this chip. Sleep. */
990                 if (mode == FL_ERASING && shared->erasing
991                     && shared->erasing->oldstate == FL_ERASING) {
992                         mutex_unlock(&shared->lock);
993                         set_current_state(TASK_UNINTERRUPTIBLE);
994                         add_wait_queue(&chip->wq, &wait);
995                         mutex_unlock(&chip->mutex);
996                         schedule();
997                         remove_wait_queue(&chip->wq, &wait);
998                         mutex_lock(&chip->mutex);
999                         goto retry;
1000                 }
1001
1002                 /* We now own it */
1003                 shared->writing = chip;
1004                 if (mode == FL_ERASING)
1005                         shared->erasing = chip;
1006                 mutex_unlock(&shared->lock);
1007         }
1008         ret = chip_ready(map, chip, adr, mode);
1009         if (ret == -EAGAIN)
1010                 goto retry;
1011
1012         return ret;
1013 }
1014
1015 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1016 {
1017         struct cfi_private *cfi = map->fldrv_priv;
1018
1019         if (chip->priv) {
1020                 struct flchip_shared *shared = chip->priv;
1021                 mutex_lock(&shared->lock);
1022                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1023                         /* We own the ability to write, but we're done */
1024                         shared->writing = shared->erasing;
1025                         if (shared->writing && shared->writing != chip) {
1026                                 /* give back ownership to who we loaned it from */
1027                                 struct flchip *loaner = shared->writing;
1028                                 mutex_lock(&loaner->mutex);
1029                                 mutex_unlock(&shared->lock);
1030                                 mutex_unlock(&chip->mutex);
1031                                 put_chip(map, loaner, loaner->start);
1032                                 mutex_lock(&chip->mutex);
1033                                 mutex_unlock(&loaner->mutex);
1034                                 wake_up(&chip->wq);
1035                                 return;
1036                         }
1037                         shared->erasing = NULL;
1038                         shared->writing = NULL;
1039                 } else if (shared->erasing == chip && shared->writing != chip) {
1040                         /*
1041                          * We own the ability to erase without the ability
1042                          * to write, which means the erase was suspended
1043                          * and some other partition is currently writing.
1044                          * Don't let the switch below mess things up since
1045                          * we don't have ownership to resume anything.
1046                          */
1047                         mutex_unlock(&shared->lock);
1048                         wake_up(&chip->wq);
1049                         return;
1050                 }
1051                 mutex_unlock(&shared->lock);
1052         }
1053
1054         switch(chip->oldstate) {
1055         case FL_ERASING:
1056                 /* What if one interleaved chip has finished and the
1057                    other hasn't? The old code would leave the finished
1058                    one in READY mode. That's bad, and caused -EROFS
1059                    errors to be returned from do_erase_oneblock because
1060                    that's the only bit it checked for at the time.
1061                    As the state machine appears to explicitly allow
1062                    sending the 0x70 (Read Status) command to an erasing
1063                    chip and expecting it to be ignored, that's what we
1064                    do. */
1065                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1066                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1067                 chip->oldstate = FL_READY;
1068                 chip->state = FL_ERASING;
1069                 break;
1070
1071         case FL_XIP_WHILE_ERASING:
1072                 chip->state = chip->oldstate;
1073                 chip->oldstate = FL_READY;
1074                 break;
1075
1076         case FL_READY:
1077         case FL_STATUS:
1078         case FL_JEDEC_QUERY:
1079                 break;
1080         default:
1081                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1082         }
1083         wake_up(&chip->wq);
1084 }
1085
1086 #ifdef CONFIG_MTD_XIP
1087
1088 /*
1089  * No interrupt what so ever can be serviced while the flash isn't in array
1090  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1091  * enclosing any code path where the flash is known not to be in array mode.
1092  * And within a XIP disabled code path, only functions marked with __xipram
1093  * may be called and nothing else (it's a good thing to inspect generated
1094  * assembly to make sure inline functions were actually inlined and that gcc
1095  * didn't emit calls to its own support functions). Also configuring MTD CFI
1096  * support to a single buswidth and a single interleave is also recommended.
1097  */
1098
1099 static void xip_disable(struct map_info *map, struct flchip *chip,
1100                         unsigned long adr)
1101 {
1102         /* TODO: chips with no XIP use should ignore and return */
1103         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1104         local_irq_disable();
1105 }
1106
1107 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1108                                 unsigned long adr)
1109 {
1110         struct cfi_private *cfi = map->fldrv_priv;
1111         if (chip->state != FL_POINT && chip->state != FL_READY) {
1112                 map_write(map, CMD(0xff), adr);
1113                 chip->state = FL_READY;
1114         }
1115         (void) map_read(map, adr);
1116         xip_iprefetch();
1117         local_irq_enable();
1118 }
1119
1120 /*
1121  * When a delay is required for the flash operation to complete, the
1122  * xip_wait_for_operation() function is polling for both the given timeout
1123  * and pending (but still masked) hardware interrupts.  Whenever there is an
1124  * interrupt pending then the flash erase or write operation is suspended,
1125  * array mode restored and interrupts unmasked.  Task scheduling might also
1126  * happen at that point.  The CPU eventually returns from the interrupt or
1127  * the call to schedule() and the suspended flash operation is resumed for
1128  * the remaining of the delay period.
1129  *
1130  * Warning: this function _will_ fool interrupt latency tracing tools.
1131  */
1132
1133 static int __xipram xip_wait_for_operation(
1134                 struct map_info *map, struct flchip *chip,
1135                 unsigned long adr, unsigned int chip_op_time_max)
1136 {
1137         struct cfi_private *cfi = map->fldrv_priv;
1138         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1139         map_word status, OK = CMD(0x80);
1140         unsigned long usec, suspended, start, done;
1141         flstate_t oldstate, newstate;
1142
1143         start = xip_currtime();
1144         usec = chip_op_time_max;
1145         if (usec == 0)
1146                 usec = 500000;
1147         done = 0;
1148
1149         do {
1150                 cpu_relax();
1151                 if (xip_irqpending() && cfip &&
1152                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1153                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1154                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1155                         /*
1156                          * Let's suspend the erase or write operation when
1157                          * supported.  Note that we currently don't try to
1158                          * suspend interleaved chips if there is already
1159                          * another operation suspended (imagine what happens
1160                          * when one chip was already done with the current
1161                          * operation while another chip suspended it, then
1162                          * we resume the whole thing at once).  Yes, it
1163                          * can happen!
1164                          */
1165                         usec -= done;
1166                         map_write(map, CMD(0xb0), adr);
1167                         map_write(map, CMD(0x70), adr);
1168                         suspended = xip_currtime();
1169                         do {
1170                                 if (xip_elapsed_since(suspended) > 100000) {
1171                                         /*
1172                                          * The chip doesn't want to suspend
1173                                          * after waiting for 100 msecs.
1174                                          * This is a critical error but there
1175                                          * is not much we can do here.
1176                                          */
1177                                         return -EIO;
1178                                 }
1179                                 status = map_read(map, adr);
1180                         } while (!map_word_andequal(map, status, OK, OK));
1181
1182                         /* Suspend succeeded */
1183                         oldstate = chip->state;
1184                         if (oldstate == FL_ERASING) {
1185                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1186                                         break;
1187                                 newstate = FL_XIP_WHILE_ERASING;
1188                                 chip->erase_suspended = 1;
1189                         } else {
1190                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1191                                         break;
1192                                 newstate = FL_XIP_WHILE_WRITING;
1193                                 chip->write_suspended = 1;
1194                         }
1195                         chip->state = newstate;
1196                         map_write(map, CMD(0xff), adr);
1197                         (void) map_read(map, adr);
1198                         xip_iprefetch();
1199                         local_irq_enable();
1200                         mutex_unlock(&chip->mutex);
1201                         xip_iprefetch();
1202                         cond_resched();
1203
1204                         /*
1205                          * We're back.  However someone else might have
1206                          * decided to go write to the chip if we are in
1207                          * a suspended erase state.  If so let's wait
1208                          * until it's done.
1209                          */
1210                         mutex_lock(&chip->mutex);
1211                         while (chip->state != newstate) {
1212                                 DECLARE_WAITQUEUE(wait, current);
1213                                 set_current_state(TASK_UNINTERRUPTIBLE);
1214                                 add_wait_queue(&chip->wq, &wait);
1215                                 mutex_unlock(&chip->mutex);
1216                                 schedule();
1217                                 remove_wait_queue(&chip->wq, &wait);
1218                                 mutex_lock(&chip->mutex);
1219                         }
1220                         /* Disallow XIP again */
1221                         local_irq_disable();
1222
1223                         /* Resume the write or erase operation */
1224                         map_write(map, CMD(0xd0), adr);
1225                         map_write(map, CMD(0x70), adr);
1226                         chip->state = oldstate;
1227                         start = xip_currtime();
1228                 } else if (usec >= 1000000/HZ) {
1229                         /*
1230                          * Try to save on CPU power when waiting delay
1231                          * is at least a system timer tick period.
1232                          * No need to be extremely accurate here.
1233                          */
1234                         xip_cpu_idle();
1235                 }
1236                 status = map_read(map, adr);
1237                 done = xip_elapsed_since(start);
1238         } while (!map_word_andequal(map, status, OK, OK)
1239                  && done < usec);
1240
1241         return (done >= usec) ? -ETIME : 0;
1242 }
1243
1244 /*
1245  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1246  * the flash is actively programming or erasing since we have to poll for
1247  * the operation to complete anyway.  We can't do that in a generic way with
1248  * a XIP setup so do it before the actual flash operation in this case
1249  * and stub it out from INVAL_CACHE_AND_WAIT.
1250  */
1251 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1252         INVALIDATE_CACHED_RANGE(map, from, size)
1253
1254 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1255         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1256
1257 #else
1258
1259 #define xip_disable(map, chip, adr)
1260 #define xip_enable(map, chip, adr)
1261 #define XIP_INVAL_CACHED_RANGE(x...)
1262 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1263
1264 static int inval_cache_and_wait_for_operation(
1265                 struct map_info *map, struct flchip *chip,
1266                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1267                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1268 {
1269         struct cfi_private *cfi = map->fldrv_priv;
1270         map_word status, status_OK = CMD(0x80);
1271         int chip_state = chip->state;
1272         unsigned int timeo, sleep_time, reset_timeo;
1273
1274         mutex_unlock(&chip->mutex);
1275         if (inval_len)
1276                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1277         mutex_lock(&chip->mutex);
1278
1279         timeo = chip_op_time_max;
1280         if (!timeo)
1281                 timeo = 500000;
1282         reset_timeo = timeo;
1283         sleep_time = chip_op_time / 2;
1284
1285         for (;;) {
1286                 if (chip->state != chip_state) {
1287                         /* Someone's suspended the operation: sleep */
1288                         DECLARE_WAITQUEUE(wait, current);
1289                         set_current_state(TASK_UNINTERRUPTIBLE);
1290                         add_wait_queue(&chip->wq, &wait);
1291                         mutex_unlock(&chip->mutex);
1292                         schedule();
1293                         remove_wait_queue(&chip->wq, &wait);
1294                         mutex_lock(&chip->mutex);
1295                         continue;
1296                 }
1297
1298                 status = map_read(map, cmd_adr);
1299                 if (map_word_andequal(map, status, status_OK, status_OK))
1300                         break;
1301
1302                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1303                         /* Erase suspend occurred while sleep: reset timeout */
1304                         timeo = reset_timeo;
1305                         chip->erase_suspended = 0;
1306                 }
1307                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1308                         /* Write suspend occurred while sleep: reset timeout */
1309                         timeo = reset_timeo;
1310                         chip->write_suspended = 0;
1311                 }
1312                 if (!timeo) {
1313                         map_write(map, CMD(0x70), cmd_adr);
1314                         chip->state = FL_STATUS;
1315                         return -ETIME;
1316                 }
1317
1318                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1319                 mutex_unlock(&chip->mutex);
1320                 if (sleep_time >= 1000000/HZ) {
1321                         /*
1322                          * Half of the normal delay still remaining
1323                          * can be performed with a sleeping delay instead
1324                          * of busy waiting.
1325                          */
1326                         msleep(sleep_time/1000);
1327                         timeo -= sleep_time;
1328                         sleep_time = 1000000/HZ;
1329                 } else {
1330                         udelay(1);
1331                         cond_resched();
1332                         timeo--;
1333                 }
1334                 mutex_lock(&chip->mutex);
1335         }
1336
1337         /* Done and happy. */
1338         chip->state = FL_STATUS;
1339         return 0;
1340 }
1341
1342 #endif
1343
1344 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1345         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1346
1347
1348 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1349 {
1350         unsigned long cmd_addr;
1351         struct cfi_private *cfi = map->fldrv_priv;
1352         int ret = 0;
1353
1354         adr += chip->start;
1355
1356         /* Ensure cmd read/writes are aligned. */
1357         cmd_addr = adr & ~(map_bankwidth(map)-1);
1358
1359         mutex_lock(&chip->mutex);
1360
1361         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1362
1363         if (!ret) {
1364                 if (chip->state != FL_POINT && chip->state != FL_READY)
1365                         map_write(map, CMD(0xff), cmd_addr);
1366
1367                 chip->state = FL_POINT;
1368                 chip->ref_point_counter++;
1369         }
1370         mutex_unlock(&chip->mutex);
1371
1372         return ret;
1373 }
1374
1375 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1376                 size_t *retlen, void **virt, resource_size_t *phys)
1377 {
1378         struct map_info *map = mtd->priv;
1379         struct cfi_private *cfi = map->fldrv_priv;
1380         unsigned long ofs, last_end = 0;
1381         int chipnum;
1382         int ret = 0;
1383
1384         if (!map->virt)
1385                 return -EINVAL;
1386
1387         /* Now lock the chip(s) to POINT state */
1388
1389         /* ofs: offset within the first chip that the first read should start */
1390         chipnum = (from >> cfi->chipshift);
1391         ofs = from - (chipnum << cfi->chipshift);
1392
1393         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1394         if (phys)
1395                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1396
1397         while (len) {
1398                 unsigned long thislen;
1399
1400                 if (chipnum >= cfi->numchips)
1401                         break;
1402
1403                 /* We cannot point across chips that are virtually disjoint */
1404                 if (!last_end)
1405                         last_end = cfi->chips[chipnum].start;
1406                 else if (cfi->chips[chipnum].start != last_end)
1407                         break;
1408
1409                 if ((len + ofs -1) >> cfi->chipshift)
1410                         thislen = (1<<cfi->chipshift) - ofs;
1411                 else
1412                         thislen = len;
1413
1414                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1415                 if (ret)
1416                         break;
1417
1418                 *retlen += thislen;
1419                 len -= thislen;
1420
1421                 ofs = 0;
1422                 last_end += 1 << cfi->chipshift;
1423                 chipnum++;
1424         }
1425         return 0;
1426 }
1427
1428 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1429 {
1430         struct map_info *map = mtd->priv;
1431         struct cfi_private *cfi = map->fldrv_priv;
1432         unsigned long ofs;
1433         int chipnum, err = 0;
1434
1435         /* Now unlock the chip(s) POINT state */
1436
1437         /* ofs: offset within the first chip that the first read should start */
1438         chipnum = (from >> cfi->chipshift);
1439         ofs = from - (chipnum <<  cfi->chipshift);
1440
1441         while (len && !err) {
1442                 unsigned long thislen;
1443                 struct flchip *chip;
1444
1445                 chip = &cfi->chips[chipnum];
1446                 if (chipnum >= cfi->numchips)
1447                         break;
1448
1449                 if ((len + ofs -1) >> cfi->chipshift)
1450                         thislen = (1<<cfi->chipshift) - ofs;
1451                 else
1452                         thislen = len;
1453
1454                 mutex_lock(&chip->mutex);
1455                 if (chip->state == FL_POINT) {
1456                         chip->ref_point_counter--;
1457                         if(chip->ref_point_counter == 0)
1458                                 chip->state = FL_READY;
1459                 } else {
1460                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1461                         err = -EINVAL;
1462                 }
1463
1464                 put_chip(map, chip, chip->start);
1465                 mutex_unlock(&chip->mutex);
1466
1467                 len -= thislen;
1468                 ofs = 0;
1469                 chipnum++;
1470         }
1471
1472         return err;
1473 }
1474
1475 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1476 {
1477         unsigned long cmd_addr;
1478         struct cfi_private *cfi = map->fldrv_priv;
1479         int ret;
1480
1481         adr += chip->start;
1482
1483         /* Ensure cmd read/writes are aligned. */
1484         cmd_addr = adr & ~(map_bankwidth(map)-1);
1485
1486         mutex_lock(&chip->mutex);
1487         ret = get_chip(map, chip, cmd_addr, FL_READY);
1488         if (ret) {
1489                 mutex_unlock(&chip->mutex);
1490                 return ret;
1491         }
1492
1493         if (chip->state != FL_POINT && chip->state != FL_READY) {
1494                 map_write(map, CMD(0xff), cmd_addr);
1495
1496                 chip->state = FL_READY;
1497         }
1498
1499         map_copy_from(map, buf, adr, len);
1500
1501         put_chip(map, chip, cmd_addr);
1502
1503         mutex_unlock(&chip->mutex);
1504         return 0;
1505 }
1506
1507 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1508 {
1509         struct map_info *map = mtd->priv;
1510         struct cfi_private *cfi = map->fldrv_priv;
1511         unsigned long ofs;
1512         int chipnum;
1513         int ret = 0;
1514
1515         /* ofs: offset within the first chip that the first read should start */
1516         chipnum = (from >> cfi->chipshift);
1517         ofs = from - (chipnum <<  cfi->chipshift);
1518
1519         while (len) {
1520                 unsigned long thislen;
1521
1522                 if (chipnum >= cfi->numchips)
1523                         break;
1524
1525                 if ((len + ofs -1) >> cfi->chipshift)
1526                         thislen = (1<<cfi->chipshift) - ofs;
1527                 else
1528                         thislen = len;
1529
1530                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1531                 if (ret)
1532                         break;
1533
1534                 *retlen += thislen;
1535                 len -= thislen;
1536                 buf += thislen;
1537
1538                 ofs = 0;
1539                 chipnum++;
1540         }
1541         return ret;
1542 }
1543
1544 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1545                                      unsigned long adr, map_word datum, int mode)
1546 {
1547         struct cfi_private *cfi = map->fldrv_priv;
1548         map_word status, write_cmd;
1549         int ret=0;
1550
1551         adr += chip->start;
1552
1553         switch (mode) {
1554         case FL_WRITING:
1555                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1556                 break;
1557         case FL_OTP_WRITE:
1558                 write_cmd = CMD(0xc0);
1559                 break;
1560         default:
1561                 return -EINVAL;
1562         }
1563
1564         mutex_lock(&chip->mutex);
1565         ret = get_chip(map, chip, adr, mode);
1566         if (ret) {
1567                 mutex_unlock(&chip->mutex);
1568                 return ret;
1569         }
1570
1571         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1572         ENABLE_VPP(map);
1573         xip_disable(map, chip, adr);
1574         map_write(map, write_cmd, adr);
1575         map_write(map, datum, adr);
1576         chip->state = mode;
1577
1578         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1579                                    adr, map_bankwidth(map),
1580                                    chip->word_write_time,
1581                                    chip->word_write_time_max);
1582         if (ret) {
1583                 xip_enable(map, chip, adr);
1584                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1585                 goto out;
1586         }
1587
1588         /* check for errors */
1589         status = map_read(map, adr);
1590         if (map_word_bitsset(map, status, CMD(0x1a))) {
1591                 unsigned long chipstatus = MERGESTATUS(status);
1592
1593                 /* reset status */
1594                 map_write(map, CMD(0x50), adr);
1595                 map_write(map, CMD(0x70), adr);
1596                 xip_enable(map, chip, adr);
1597
1598                 if (chipstatus & 0x02) {
1599                         ret = -EROFS;
1600                 } else if (chipstatus & 0x08) {
1601                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1602                         ret = -EIO;
1603                 } else {
1604                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1605                         ret = -EINVAL;
1606                 }
1607
1608                 goto out;
1609         }
1610
1611         xip_enable(map, chip, adr);
1612  out:   DISABLE_VPP(map);
1613         put_chip(map, chip, adr);
1614         mutex_unlock(&chip->mutex);
1615         return ret;
1616 }
1617
1618
1619 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1620 {
1621         struct map_info *map = mtd->priv;
1622         struct cfi_private *cfi = map->fldrv_priv;
1623         int ret = 0;
1624         int chipnum;
1625         unsigned long ofs;
1626
1627         chipnum = to >> cfi->chipshift;
1628         ofs = to  - (chipnum << cfi->chipshift);
1629
1630         /* If it's not bus-aligned, do the first byte write */
1631         if (ofs & (map_bankwidth(map)-1)) {
1632                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1633                 int gap = ofs - bus_ofs;
1634                 int n;
1635                 map_word datum;
1636
1637                 n = min_t(int, len, map_bankwidth(map)-gap);
1638                 datum = map_word_ff(map);
1639                 datum = map_word_load_partial(map, datum, buf, gap, n);
1640
1641                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1642                                                bus_ofs, datum, FL_WRITING);
1643                 if (ret)
1644                         return ret;
1645
1646                 len -= n;
1647                 ofs += n;
1648                 buf += n;
1649                 (*retlen) += n;
1650
1651                 if (ofs >> cfi->chipshift) {
1652                         chipnum ++;
1653                         ofs = 0;
1654                         if (chipnum == cfi->numchips)
1655                                 return 0;
1656                 }
1657         }
1658
1659         while(len >= map_bankwidth(map)) {
1660                 map_word datum = map_word_load(map, buf);
1661
1662                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1663                                        ofs, datum, FL_WRITING);
1664                 if (ret)
1665                         return ret;
1666
1667                 ofs += map_bankwidth(map);
1668                 buf += map_bankwidth(map);
1669                 (*retlen) += map_bankwidth(map);
1670                 len -= map_bankwidth(map);
1671
1672                 if (ofs >> cfi->chipshift) {
1673                         chipnum ++;
1674                         ofs = 0;
1675                         if (chipnum == cfi->numchips)
1676                                 return 0;
1677                 }
1678         }
1679
1680         if (len & (map_bankwidth(map)-1)) {
1681                 map_word datum;
1682
1683                 datum = map_word_ff(map);
1684                 datum = map_word_load_partial(map, datum, buf, 0, len);
1685
1686                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1687                                        ofs, datum, FL_WRITING);
1688                 if (ret)
1689                         return ret;
1690
1691                 (*retlen) += len;
1692         }
1693
1694         return 0;
1695 }
1696
1697
1698 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1699                                     unsigned long adr, const struct kvec **pvec,
1700                                     unsigned long *pvec_seek, int len)
1701 {
1702         struct cfi_private *cfi = map->fldrv_priv;
1703         map_word status, write_cmd, datum;
1704         unsigned long cmd_adr;
1705         int ret, wbufsize, word_gap, words;
1706         const struct kvec *vec;
1707         unsigned long vec_seek;
1708         unsigned long initial_adr;
1709         int initial_len = len;
1710
1711         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1712         adr += chip->start;
1713         initial_adr = adr;
1714         cmd_adr = adr & ~(wbufsize-1);
1715
1716         /* Sharp LH28F640BF chips need the first address for the
1717          * Page Buffer Program command. See Table 5 of
1718          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1719         if (is_LH28F640BF(cfi))
1720                 cmd_adr = adr;
1721
1722         /* Let's determine this according to the interleave only once */
1723         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1724
1725         mutex_lock(&chip->mutex);
1726         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1727         if (ret) {
1728                 mutex_unlock(&chip->mutex);
1729                 return ret;
1730         }
1731
1732         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1733         ENABLE_VPP(map);
1734         xip_disable(map, chip, cmd_adr);
1735
1736         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1737            [...], the device will not accept any more Write to Buffer commands".
1738            So we must check here and reset those bits if they're set. Otherwise
1739            we're just pissing in the wind */
1740         if (chip->state != FL_STATUS) {
1741                 map_write(map, CMD(0x70), cmd_adr);
1742                 chip->state = FL_STATUS;
1743         }
1744         status = map_read(map, cmd_adr);
1745         if (map_word_bitsset(map, status, CMD(0x30))) {
1746                 xip_enable(map, chip, cmd_adr);
1747                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1748                 xip_disable(map, chip, cmd_adr);
1749                 map_write(map, CMD(0x50), cmd_adr);
1750                 map_write(map, CMD(0x70), cmd_adr);
1751         }
1752
1753         chip->state = FL_WRITING_TO_BUFFER;
1754         map_write(map, write_cmd, cmd_adr);
1755         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1756         if (ret) {
1757                 /* Argh. Not ready for write to buffer */
1758                 map_word Xstatus = map_read(map, cmd_adr);
1759                 map_write(map, CMD(0x70), cmd_adr);
1760                 chip->state = FL_STATUS;
1761                 status = map_read(map, cmd_adr);
1762                 map_write(map, CMD(0x50), cmd_adr);
1763                 map_write(map, CMD(0x70), cmd_adr);
1764                 xip_enable(map, chip, cmd_adr);
1765                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1766                                 map->name, Xstatus.x[0], status.x[0]);
1767                 goto out;
1768         }
1769
1770         /* Figure out the number of words to write */
1771         word_gap = (-adr & (map_bankwidth(map)-1));
1772         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1773         if (!word_gap) {
1774                 words--;
1775         } else {
1776                 word_gap = map_bankwidth(map) - word_gap;
1777                 adr -= word_gap;
1778                 datum = map_word_ff(map);
1779         }
1780
1781         /* Write length of data to come */
1782         map_write(map, CMD(words), cmd_adr );
1783
1784         /* Write data */
1785         vec = *pvec;
1786         vec_seek = *pvec_seek;
1787         do {
1788                 int n = map_bankwidth(map) - word_gap;
1789                 if (n > vec->iov_len - vec_seek)
1790                         n = vec->iov_len - vec_seek;
1791                 if (n > len)
1792                         n = len;
1793
1794                 if (!word_gap && len < map_bankwidth(map))
1795                         datum = map_word_ff(map);
1796
1797                 datum = map_word_load_partial(map, datum,
1798                                               vec->iov_base + vec_seek,
1799                                               word_gap, n);
1800
1801                 len -= n;
1802                 word_gap += n;
1803                 if (!len || word_gap == map_bankwidth(map)) {
1804                         map_write(map, datum, adr);
1805                         adr += map_bankwidth(map);
1806                         word_gap = 0;
1807                 }
1808
1809                 vec_seek += n;
1810                 if (vec_seek == vec->iov_len) {
1811                         vec++;
1812                         vec_seek = 0;
1813                 }
1814         } while (len);
1815         *pvec = vec;
1816         *pvec_seek = vec_seek;
1817
1818         /* GO GO GO */
1819         map_write(map, CMD(0xd0), cmd_adr);
1820         chip->state = FL_WRITING;
1821
1822         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1823                                    initial_adr, initial_len,
1824                                    chip->buffer_write_time,
1825                                    chip->buffer_write_time_max);
1826         if (ret) {
1827                 map_write(map, CMD(0x70), cmd_adr);
1828                 chip->state = FL_STATUS;
1829                 xip_enable(map, chip, cmd_adr);
1830                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1831                 goto out;
1832         }
1833
1834         /* check for errors */
1835         status = map_read(map, cmd_adr);
1836         if (map_word_bitsset(map, status, CMD(0x1a))) {
1837                 unsigned long chipstatus = MERGESTATUS(status);
1838
1839                 /* reset status */
1840                 map_write(map, CMD(0x50), cmd_adr);
1841                 map_write(map, CMD(0x70), cmd_adr);
1842                 xip_enable(map, chip, cmd_adr);
1843
1844                 if (chipstatus & 0x02) {
1845                         ret = -EROFS;
1846                 } else if (chipstatus & 0x08) {
1847                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1848                         ret = -EIO;
1849                 } else {
1850                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1851                         ret = -EINVAL;
1852                 }
1853
1854                 goto out;
1855         }
1856
1857         xip_enable(map, chip, cmd_adr);
1858  out:   DISABLE_VPP(map);
1859         put_chip(map, chip, cmd_adr);
1860         mutex_unlock(&chip->mutex);
1861         return ret;
1862 }
1863
1864 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1865                                 unsigned long count, loff_t to, size_t *retlen)
1866 {
1867         struct map_info *map = mtd->priv;
1868         struct cfi_private *cfi = map->fldrv_priv;
1869         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1870         int ret = 0;
1871         int chipnum;
1872         unsigned long ofs, vec_seek, i;
1873         size_t len = 0;
1874
1875         for (i = 0; i < count; i++)
1876                 len += vecs[i].iov_len;
1877
1878         if (!len)
1879                 return 0;
1880
1881         chipnum = to >> cfi->chipshift;
1882         ofs = to - (chipnum << cfi->chipshift);
1883         vec_seek = 0;
1884
1885         do {
1886                 /* We must not cross write block boundaries */
1887                 int size = wbufsize - (ofs & (wbufsize-1));
1888
1889                 if (size > len)
1890                         size = len;
1891                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1892                                       ofs, &vecs, &vec_seek, size);
1893                 if (ret)
1894                         return ret;
1895
1896                 ofs += size;
1897                 (*retlen) += size;
1898                 len -= size;
1899
1900                 if (ofs >> cfi->chipshift) {
1901                         chipnum ++;
1902                         ofs = 0;
1903                         if (chipnum == cfi->numchips)
1904                                 return 0;
1905                 }
1906
1907                 /* Be nice and reschedule with the chip in a usable state for other
1908                    processes. */
1909                 cond_resched();
1910
1911         } while (len);
1912
1913         return 0;
1914 }
1915
1916 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1917                                        size_t len, size_t *retlen, const u_char *buf)
1918 {
1919         struct kvec vec;
1920
1921         vec.iov_base = (void *) buf;
1922         vec.iov_len = len;
1923
1924         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1925 }
1926
1927 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1928                                       unsigned long adr, int len, void *thunk)
1929 {
1930         struct cfi_private *cfi = map->fldrv_priv;
1931         map_word status;
1932         int retries = 3;
1933         int ret;
1934
1935         adr += chip->start;
1936
1937  retry:
1938         mutex_lock(&chip->mutex);
1939         ret = get_chip(map, chip, adr, FL_ERASING);
1940         if (ret) {
1941                 mutex_unlock(&chip->mutex);
1942                 return ret;
1943         }
1944
1945         XIP_INVAL_CACHED_RANGE(map, adr, len);
1946         ENABLE_VPP(map);
1947         xip_disable(map, chip, adr);
1948
1949         /* Clear the status register first */
1950         map_write(map, CMD(0x50), adr);
1951
1952         /* Now erase */
1953         map_write(map, CMD(0x20), adr);
1954         map_write(map, CMD(0xD0), adr);
1955         chip->state = FL_ERASING;
1956         chip->erase_suspended = 0;
1957         chip->in_progress_block_addr = adr;
1958         chip->in_progress_block_mask = ~(len - 1);
1959
1960         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1961                                    adr, len,
1962                                    chip->erase_time,
1963                                    chip->erase_time_max);
1964         if (ret) {
1965                 map_write(map, CMD(0x70), adr);
1966                 chip->state = FL_STATUS;
1967                 xip_enable(map, chip, adr);
1968                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1969                 goto out;
1970         }
1971
1972         /* We've broken this before. It doesn't hurt to be safe */
1973         map_write(map, CMD(0x70), adr);
1974         chip->state = FL_STATUS;
1975         status = map_read(map, adr);
1976
1977         /* check for errors */
1978         if (map_word_bitsset(map, status, CMD(0x3a))) {
1979                 unsigned long chipstatus = MERGESTATUS(status);
1980
1981                 /* Reset the error bits */
1982                 map_write(map, CMD(0x50), adr);
1983                 map_write(map, CMD(0x70), adr);
1984                 xip_enable(map, chip, adr);
1985
1986                 if ((chipstatus & 0x30) == 0x30) {
1987                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1988                         ret = -EINVAL;
1989                 } else if (chipstatus & 0x02) {
1990                         /* Protection bit set */
1991                         ret = -EROFS;
1992                 } else if (chipstatus & 0x8) {
1993                         /* Voltage */
1994                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1995                         ret = -EIO;
1996                 } else if (chipstatus & 0x20 && retries--) {
1997                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1998                         DISABLE_VPP(map);
1999                         put_chip(map, chip, adr);
2000                         mutex_unlock(&chip->mutex);
2001                         goto retry;
2002                 } else {
2003                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2004                         ret = -EIO;
2005                 }
2006
2007                 goto out;
2008         }
2009
2010         xip_enable(map, chip, adr);
2011  out:   DISABLE_VPP(map);
2012         put_chip(map, chip, adr);
2013         mutex_unlock(&chip->mutex);
2014         return ret;
2015 }
2016
2017 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2018 {
2019         return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2020                                 instr->len, NULL);
2021 }
2022
2023 static void cfi_intelext_sync (struct mtd_info *mtd)
2024 {
2025         struct map_info *map = mtd->priv;
2026         struct cfi_private *cfi = map->fldrv_priv;
2027         int i;
2028         struct flchip *chip;
2029         int ret = 0;
2030
2031         for (i=0; !ret && i<cfi->numchips; i++) {
2032                 chip = &cfi->chips[i];
2033
2034                 mutex_lock(&chip->mutex);
2035                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2036
2037                 if (!ret) {
2038                         chip->oldstate = chip->state;
2039                         chip->state = FL_SYNCING;
2040                         /* No need to wake_up() on this state change -
2041                          * as the whole point is that nobody can do anything
2042                          * with the chip now anyway.
2043                          */
2044                 }
2045                 mutex_unlock(&chip->mutex);
2046         }
2047
2048         /* Unlock the chips again */
2049
2050         for (i--; i >=0; i--) {
2051                 chip = &cfi->chips[i];
2052
2053                 mutex_lock(&chip->mutex);
2054
2055                 if (chip->state == FL_SYNCING) {
2056                         chip->state = chip->oldstate;
2057                         chip->oldstate = FL_READY;
2058                         wake_up(&chip->wq);
2059                 }
2060                 mutex_unlock(&chip->mutex);
2061         }
2062 }
2063
2064 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2065                                                 struct flchip *chip,
2066                                                 unsigned long adr,
2067                                                 int len, void *thunk)
2068 {
2069         struct cfi_private *cfi = map->fldrv_priv;
2070         int status, ofs_factor = cfi->interleave * cfi->device_type;
2071
2072         adr += chip->start;
2073         xip_disable(map, chip, adr+(2*ofs_factor));
2074         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2075         chip->state = FL_JEDEC_QUERY;
2076         status = cfi_read_query(map, adr+(2*ofs_factor));
2077         xip_enable(map, chip, 0);
2078         return status;
2079 }
2080
2081 #ifdef DEBUG_LOCK_BITS
2082 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2083                                                 struct flchip *chip,
2084                                                 unsigned long adr,
2085                                                 int len, void *thunk)
2086 {
2087         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2088                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2089         return 0;
2090 }
2091 #endif
2092
2093 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2094 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2095
2096 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2097                                        unsigned long adr, int len, void *thunk)
2098 {
2099         struct cfi_private *cfi = map->fldrv_priv;
2100         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2101         int mdelay;
2102         int ret;
2103
2104         adr += chip->start;
2105
2106         mutex_lock(&chip->mutex);
2107         ret = get_chip(map, chip, adr, FL_LOCKING);
2108         if (ret) {
2109                 mutex_unlock(&chip->mutex);
2110                 return ret;
2111         }
2112
2113         ENABLE_VPP(map);
2114         xip_disable(map, chip, adr);
2115
2116         map_write(map, CMD(0x60), adr);
2117         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2118                 map_write(map, CMD(0x01), adr);
2119                 chip->state = FL_LOCKING;
2120         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2121                 map_write(map, CMD(0xD0), adr);
2122                 chip->state = FL_UNLOCKING;
2123         } else
2124                 BUG();
2125
2126         /*
2127          * If Instant Individual Block Locking supported then no need
2128          * to delay.
2129          */
2130         /*
2131          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2132          * lets use a max of 1.5 seconds (1500ms) as timeout.
2133          *
2134          * See "Clear Block Lock-Bits Time" on page 40 in
2135          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2136          * from February 2003
2137          */
2138         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2139
2140         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2141         if (ret) {
2142                 map_write(map, CMD(0x70), adr);
2143                 chip->state = FL_STATUS;
2144                 xip_enable(map, chip, adr);
2145                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2146                 goto out;
2147         }
2148
2149         xip_enable(map, chip, adr);
2150  out:   DISABLE_VPP(map);
2151         put_chip(map, chip, adr);
2152         mutex_unlock(&chip->mutex);
2153         return ret;
2154 }
2155
2156 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2157 {
2158         int ret;
2159
2160 #ifdef DEBUG_LOCK_BITS
2161         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2162                __func__, ofs, len);
2163         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2164                 ofs, len, NULL);
2165 #endif
2166
2167         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2168                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2169
2170 #ifdef DEBUG_LOCK_BITS
2171         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2172                __func__, ret);
2173         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2174                 ofs, len, NULL);
2175 #endif
2176
2177         return ret;
2178 }
2179
2180 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2181 {
2182         int ret;
2183
2184 #ifdef DEBUG_LOCK_BITS
2185         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2186                __func__, ofs, len);
2187         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2188                 ofs, len, NULL);
2189 #endif
2190
2191         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2192                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2193
2194 #ifdef DEBUG_LOCK_BITS
2195         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2196                __func__, ret);
2197         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2198                 ofs, len, NULL);
2199 #endif
2200
2201         return ret;
2202 }
2203
2204 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2205                                   uint64_t len)
2206 {
2207         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2208                                 ofs, len, NULL) ? 1 : 0;
2209 }
2210
2211 #ifdef CONFIG_MTD_OTP
2212
2213 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2214                         u_long data_offset, u_char *buf, u_int size,
2215                         u_long prot_offset, u_int groupno, u_int groupsize);
2216
2217 static int __xipram
2218 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2219             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2220 {
2221         struct cfi_private *cfi = map->fldrv_priv;
2222         int ret;
2223
2224         mutex_lock(&chip->mutex);
2225         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2226         if (ret) {
2227                 mutex_unlock(&chip->mutex);
2228                 return ret;
2229         }
2230
2231         /* let's ensure we're not reading back cached data from array mode */
2232         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2233
2234         xip_disable(map, chip, chip->start);
2235         if (chip->state != FL_JEDEC_QUERY) {
2236                 map_write(map, CMD(0x90), chip->start);
2237                 chip->state = FL_JEDEC_QUERY;
2238         }
2239         map_copy_from(map, buf, chip->start + offset, size);
2240         xip_enable(map, chip, chip->start);
2241
2242         /* then ensure we don't keep OTP data in the cache */
2243         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2244
2245         put_chip(map, chip, chip->start);
2246         mutex_unlock(&chip->mutex);
2247         return 0;
2248 }
2249
2250 static int
2251 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2252              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2253 {
2254         int ret;
2255
2256         while (size) {
2257                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2258                 int gap = offset - bus_ofs;
2259                 int n = min_t(int, size, map_bankwidth(map)-gap);
2260                 map_word datum = map_word_ff(map);
2261
2262                 datum = map_word_load_partial(map, datum, buf, gap, n);
2263                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2264                 if (ret)
2265                         return ret;
2266
2267                 offset += n;
2268                 buf += n;
2269                 size -= n;
2270         }
2271
2272         return 0;
2273 }
2274
2275 static int
2276 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2277             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2278 {
2279         struct cfi_private *cfi = map->fldrv_priv;
2280         map_word datum;
2281
2282         /* make sure area matches group boundaries */
2283         if (size != grpsz)
2284                 return -EXDEV;
2285
2286         datum = map_word_ff(map);
2287         datum = map_word_clr(map, datum, CMD(1 << grpno));
2288         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2289 }
2290
2291 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2292                                  size_t *retlen, u_char *buf,
2293                                  otp_op_t action, int user_regs)
2294 {
2295         struct map_info *map = mtd->priv;
2296         struct cfi_private *cfi = map->fldrv_priv;
2297         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2298         struct flchip *chip;
2299         struct cfi_intelext_otpinfo *otp;
2300         u_long devsize, reg_prot_offset, data_offset;
2301         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2302         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2303         int ret;
2304
2305         *retlen = 0;
2306
2307         /* Check that we actually have some OTP registers */
2308         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2309                 return -ENODATA;
2310
2311         /* we need real chips here not virtual ones */
2312         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2313         chip_step = devsize >> cfi->chipshift;
2314         chip_num = 0;
2315
2316         /* Some chips have OTP located in the _top_ partition only.
2317            For example: Intel 28F256L18T (T means top-parameter device) */
2318         if (cfi->mfr == CFI_MFR_INTEL) {
2319                 switch (cfi->id) {
2320                 case 0x880b:
2321                 case 0x880c:
2322                 case 0x880d:
2323                         chip_num = chip_step - 1;
2324                 }
2325         }
2326
2327         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2328                 chip = &cfi->chips[chip_num];
2329                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2330
2331                 /* first OTP region */
2332                 field = 0;
2333                 reg_prot_offset = extp->ProtRegAddr;
2334                 reg_fact_groups = 1;
2335                 reg_fact_size = 1 << extp->FactProtRegSize;
2336                 reg_user_groups = 1;
2337                 reg_user_size = 1 << extp->UserProtRegSize;
2338
2339                 while (len > 0) {
2340                         /* flash geometry fixup */
2341                         data_offset = reg_prot_offset + 1;
2342                         data_offset *= cfi->interleave * cfi->device_type;
2343                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2344                         reg_fact_size *= cfi->interleave;
2345                         reg_user_size *= cfi->interleave;
2346
2347                         if (user_regs) {
2348                                 groups = reg_user_groups;
2349                                 groupsize = reg_user_size;
2350                                 /* skip over factory reg area */
2351                                 groupno = reg_fact_groups;
2352                                 data_offset += reg_fact_groups * reg_fact_size;
2353                         } else {
2354                                 groups = reg_fact_groups;
2355                                 groupsize = reg_fact_size;
2356                                 groupno = 0;
2357                         }
2358
2359                         while (len > 0 && groups > 0) {
2360                                 if (!action) {
2361                                         /*
2362                                          * Special case: if action is NULL
2363                                          * we fill buf with otp_info records.
2364                                          */
2365                                         struct otp_info *otpinfo;
2366                                         map_word lockword;
2367                                         len -= sizeof(struct otp_info);
2368                                         if (len <= 0)
2369                                                 return -ENOSPC;
2370                                         ret = do_otp_read(map, chip,
2371                                                           reg_prot_offset,
2372                                                           (u_char *)&lockword,
2373                                                           map_bankwidth(map),
2374                                                           0, 0,  0);
2375                                         if (ret)
2376                                                 return ret;
2377                                         otpinfo = (struct otp_info *)buf;
2378                                         otpinfo->start = from;
2379                                         otpinfo->length = groupsize;
2380                                         otpinfo->locked =
2381                                            !map_word_bitsset(map, lockword,
2382                                                              CMD(1 << groupno));
2383                                         from += groupsize;
2384                                         buf += sizeof(*otpinfo);
2385                                         *retlen += sizeof(*otpinfo);
2386                                 } else if (from >= groupsize) {
2387                                         from -= groupsize;
2388                                         data_offset += groupsize;
2389                                 } else {
2390                                         int size = groupsize;
2391                                         data_offset += from;
2392                                         size -= from;
2393                                         from = 0;
2394                                         if (size > len)
2395                                                 size = len;
2396                                         ret = action(map, chip, data_offset,
2397                                                      buf, size, reg_prot_offset,
2398                                                      groupno, groupsize);
2399                                         if (ret < 0)
2400                                                 return ret;
2401                                         buf += size;
2402                                         len -= size;
2403                                         *retlen += size;
2404                                         data_offset += size;
2405                                 }
2406                                 groupno++;
2407                                 groups--;
2408                         }
2409
2410                         /* next OTP region */
2411                         if (++field == extp->NumProtectionFields)
2412                                 break;
2413                         reg_prot_offset = otp->ProtRegAddr;
2414                         reg_fact_groups = otp->FactGroups;
2415                         reg_fact_size = 1 << otp->FactProtRegSize;
2416                         reg_user_groups = otp->UserGroups;
2417                         reg_user_size = 1 << otp->UserProtRegSize;
2418                         otp++;
2419                 }
2420         }
2421
2422         return 0;
2423 }
2424
2425 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2426                                            size_t len, size_t *retlen,
2427                                             u_char *buf)
2428 {
2429         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2430                                      buf, do_otp_read, 0);
2431 }
2432
2433 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2434                                            size_t len, size_t *retlen,
2435                                             u_char *buf)
2436 {
2437         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2438                                      buf, do_otp_read, 1);
2439 }
2440
2441 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2442                                             size_t len, size_t *retlen,
2443                                              u_char *buf)
2444 {
2445         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2446                                      buf, do_otp_write, 1);
2447 }
2448
2449 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2450                                            loff_t from, size_t len)
2451 {
2452         size_t retlen;
2453         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2454                                      NULL, do_otp_lock, 1);
2455 }
2456
2457 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2458                                            size_t *retlen, struct otp_info *buf)
2459
2460 {
2461         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2462                                      NULL, 0);
2463 }
2464
2465 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2466                                            size_t *retlen, struct otp_info *buf)
2467 {
2468         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2469                                      NULL, 1);
2470 }
2471
2472 #endif
2473
2474 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2475 {
2476         struct mtd_erase_region_info *region;
2477         int block, status, i;
2478         unsigned long adr;
2479         size_t len;
2480
2481         for (i = 0; i < mtd->numeraseregions; i++) {
2482                 region = &mtd->eraseregions[i];
2483                 if (!region->lockmap)
2484                         continue;
2485
2486                 for (block = 0; block < region->numblocks; block++){
2487                         len = region->erasesize;
2488                         adr = region->offset + block * len;
2489
2490                         status = cfi_varsize_frob(mtd,
2491                                         do_getlockstatus_oneblock, adr, len, NULL);
2492                         if (status)
2493                                 set_bit(block, region->lockmap);
2494                         else
2495                                 clear_bit(block, region->lockmap);
2496                 }
2497         }
2498 }
2499
2500 static int cfi_intelext_suspend(struct mtd_info *mtd)
2501 {
2502         struct map_info *map = mtd->priv;
2503         struct cfi_private *cfi = map->fldrv_priv;
2504         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2505         int i;
2506         struct flchip *chip;
2507         int ret = 0;
2508
2509         if ((mtd->flags & MTD_POWERUP_LOCK)
2510             && extp && (extp->FeatureSupport & (1 << 5)))
2511                 cfi_intelext_save_locks(mtd);
2512
2513         for (i=0; !ret && i<cfi->numchips; i++) {
2514                 chip = &cfi->chips[i];
2515
2516                 mutex_lock(&chip->mutex);
2517
2518                 switch (chip->state) {
2519                 case FL_READY:
2520                 case FL_STATUS:
2521                 case FL_CFI_QUERY:
2522                 case FL_JEDEC_QUERY:
2523                         if (chip->oldstate == FL_READY) {
2524                                 /* place the chip in a known state before suspend */
2525                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2526                                 chip->oldstate = chip->state;
2527                                 chip->state = FL_PM_SUSPENDED;
2528                                 /* No need to wake_up() on this state change -
2529                                  * as the whole point is that nobody can do anything
2530                                  * with the chip now anyway.
2531                                  */
2532                         } else {
2533                                 /* There seems to be an operation pending. We must wait for it. */
2534                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2535                                 ret = -EAGAIN;
2536                         }
2537                         break;
2538                 default:
2539                         /* Should we actually wait? Once upon a time these routines weren't
2540                            allowed to. Or should we return -EAGAIN, because the upper layers
2541                            ought to have already shut down anything which was using the device
2542                            anyway? The latter for now. */
2543                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2544                         ret = -EAGAIN;
2545                 case FL_PM_SUSPENDED:
2546                         break;
2547                 }
2548                 mutex_unlock(&chip->mutex);
2549         }
2550
2551         /* Unlock the chips again */
2552
2553         if (ret) {
2554                 for (i--; i >=0; i--) {
2555                         chip = &cfi->chips[i];
2556
2557                         mutex_lock(&chip->mutex);
2558
2559                         if (chip->state == FL_PM_SUSPENDED) {
2560                                 /* No need to force it into a known state here,
2561                                    because we're returning failure, and it didn't
2562                                    get power cycled */
2563                                 chip->state = chip->oldstate;
2564                                 chip->oldstate = FL_READY;
2565                                 wake_up(&chip->wq);
2566                         }
2567                         mutex_unlock(&chip->mutex);
2568                 }
2569         }
2570
2571         return ret;
2572 }
2573
2574 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2575 {
2576         struct mtd_erase_region_info *region;
2577         int block, i;
2578         unsigned long adr;
2579         size_t len;
2580
2581         for (i = 0; i < mtd->numeraseregions; i++) {
2582                 region = &mtd->eraseregions[i];
2583                 if (!region->lockmap)
2584                         continue;
2585
2586                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2587                         len = region->erasesize;
2588                         adr = region->offset + block * len;
2589                         cfi_intelext_unlock(mtd, adr, len);
2590                 }
2591         }
2592 }
2593
2594 static void cfi_intelext_resume(struct mtd_info *mtd)
2595 {
2596         struct map_info *map = mtd->priv;
2597         struct cfi_private *cfi = map->fldrv_priv;
2598         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2599         int i;
2600         struct flchip *chip;
2601
2602         for (i=0; i<cfi->numchips; i++) {
2603
2604                 chip = &cfi->chips[i];
2605
2606                 mutex_lock(&chip->mutex);
2607
2608                 /* Go to known state. Chip may have been power cycled */
2609                 if (chip->state == FL_PM_SUSPENDED) {
2610                         /* Refresh LH28F640BF Partition Config. Register */
2611                         fixup_LH28F640BF(mtd);
2612                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2613                         chip->oldstate = chip->state = FL_READY;
2614                         wake_up(&chip->wq);
2615                 }
2616
2617                 mutex_unlock(&chip->mutex);
2618         }
2619
2620         if ((mtd->flags & MTD_POWERUP_LOCK)
2621             && extp && (extp->FeatureSupport & (1 << 5)))
2622                 cfi_intelext_restore_locks(mtd);
2623 }
2624
2625 static int cfi_intelext_reset(struct mtd_info *mtd)
2626 {
2627         struct map_info *map = mtd->priv;
2628         struct cfi_private *cfi = map->fldrv_priv;
2629         int i, ret;
2630
2631         for (i=0; i < cfi->numchips; i++) {
2632                 struct flchip *chip = &cfi->chips[i];
2633
2634                 /* force the completion of any ongoing operation
2635                    and switch to array mode so any bootloader in
2636                    flash is accessible for soft reboot. */
2637                 mutex_lock(&chip->mutex);
2638                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2639                 if (!ret) {
2640                         map_write(map, CMD(0xff), chip->start);
2641                         chip->state = FL_SHUTDOWN;
2642                         put_chip(map, chip, chip->start);
2643                 }
2644                 mutex_unlock(&chip->mutex);
2645         }
2646
2647         return 0;
2648 }
2649
2650 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2651                                void *v)
2652 {
2653         struct mtd_info *mtd;
2654
2655         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2656         cfi_intelext_reset(mtd);
2657         return NOTIFY_DONE;
2658 }
2659
2660 static void cfi_intelext_destroy(struct mtd_info *mtd)
2661 {
2662         struct map_info *map = mtd->priv;
2663         struct cfi_private *cfi = map->fldrv_priv;
2664         struct mtd_erase_region_info *region;
2665         int i;
2666         cfi_intelext_reset(mtd);
2667         unregister_reboot_notifier(&mtd->reboot_notifier);
2668         kfree(cfi->cmdset_priv);
2669         kfree(cfi->cfiq);
2670         kfree(cfi->chips[0].priv);
2671         kfree(cfi);
2672         for (i = 0; i < mtd->numeraseregions; i++) {
2673                 region = &mtd->eraseregions[i];
2674                 kfree(region->lockmap);
2675         }
2676         kfree(mtd->eraseregions);
2677 }
2678
2679 MODULE_LICENSE("GPL");
2680 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2681 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2682 MODULE_ALIAS("cfi_cmdset_0003");
2683 MODULE_ALIAS("cfi_cmdset_0200");