FreeBSD/Linux Kernel Cross Reference
sys/dev/cfi/cfi_core.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007, Juniper Networks, Inc.
5 * Copyright (c) 2012-2013, SRI International
6 * All rights reserved.
7 *
8 * Portions of this software were developed by SRI International and the
9 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
10 * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11 * programme.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the author nor the names of any co-contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_cfi.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/endian.h>
48 #include <sys/kenv.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/rman.h>
53 #include <sys/sysctl.h>
54
55 #include <machine/bus.h>
56
57 #include <dev/cfi/cfi_reg.h>
58 #include <dev/cfi/cfi_var.h>
59
60 static void cfi_add_sysctls(struct cfi_softc *);
61
62 extern struct cdevsw cfi_cdevsw;
63
64 char cfi_driver_name[] = "cfi";
65
66 uint32_t
67 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
68 {
69 uint32_t val;
70
71 ofs &= ~(sc->sc_width - 1);
72 switch (sc->sc_width) {
73 case 1:
74 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
75 break;
76 case 2:
77 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
78 break;
79 case 4:
80 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
81 break;
82 default:
83 val = ~0;
84 break;
85 }
86 return (val);
87 }
88
89 uint32_t
90 cfi_read(struct cfi_softc *sc, u_int ofs)
91 {
92 uint32_t val;
93 uint16_t sval;
94
95 ofs &= ~(sc->sc_width - 1);
96 switch (sc->sc_width) {
97 case 1:
98 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
99 break;
100 case 2:
101 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
102 #ifdef CFI_HARDWAREBYTESWAP
103 val = sval;
104 #else
105 val = le16toh(sval);
106 #endif
107 break;
108 case 4:
109 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
110 #ifndef CFI_HARDWAREBYTESWAP
111 val = le32toh(val);
112 #endif
113 break;
114 default:
115 val = ~0;
116 break;
117 }
118 return (val);
119 }
120
121 static void
122 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
123 {
124
125 ofs &= ~(sc->sc_width - 1);
126 switch (sc->sc_width) {
127 case 1:
128 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
129 break;
130 case 2:
131 #ifdef CFI_HARDWAREBYTESWAP
132 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
133 #else
134 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
135
136 #endif
137 break;
138 case 4:
139 #ifdef CFI_HARDWAREBYTESWAP
140 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
141 #else
142 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
143 #endif
144 break;
145 }
146 }
147
148 /*
149 * This is same workaound as NetBSD sys/dev/nor/cfi.c cfi_reset_default()
150 */
151 static void
152 cfi_reset_default(struct cfi_softc *sc)
153 {
154
155 cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
156 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
157 }
158
159 uint8_t
160 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
161 {
162 uint8_t val;
163
164 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
165 val = cfi_read(sc, ofs * sc->sc_width);
166 cfi_reset_default(sc);
167 return (val);
168 }
169
170 static void
171 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
172 {
173
174 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
175 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
176 cfi_write(sc, ofs + addr, data);
177 }
178
179 static char *
180 cfi_fmtsize(uint32_t sz)
181 {
182 static char buf[8];
183 static const char *sfx[] = { "", "K", "M", "G" };
184 int sfxidx;
185
186 sfxidx = 0;
187 while (sfxidx < 3 && sz > 1023) {
188 sz /= 1024;
189 sfxidx++;
190 }
191
192 sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
193 return (buf);
194 }
195
196 int
197 cfi_probe(device_t dev)
198 {
199 char desc[80];
200 struct cfi_softc *sc;
201 char *vend_str;
202 int error;
203 uint16_t iface, vend;
204
205 sc = device_get_softc(dev);
206 sc->sc_dev = dev;
207
208 sc->sc_rid = 0;
209 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
210 RF_ACTIVE);
211 if (sc->sc_res == NULL)
212 return (ENXIO);
213
214 sc->sc_tag = rman_get_bustag(sc->sc_res);
215 sc->sc_handle = rman_get_bushandle(sc->sc_res);
216
217 if (sc->sc_width == 0) {
218 sc->sc_width = 1;
219 while (sc->sc_width <= 4) {
220 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
221 break;
222 sc->sc_width <<= 1;
223 }
224 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
225 error = ENXIO;
226 goto out;
227 }
228 if (sc->sc_width > 4) {
229 error = ENXIO;
230 goto out;
231 }
232
233 /* We got a Q. Check if we also have the R and the Y. */
234 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
235 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
236 error = ENXIO;
237 goto out;
238 }
239
240 /* Get the vendor and command set. */
241 vend = cfi_read_qry(sc, CFI_QRY_VEND) |
242 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
243
244 sc->sc_cmdset = vend;
245
246 switch (vend) {
247 case CFI_VEND_AMD_ECS:
248 case CFI_VEND_AMD_SCS:
249 vend_str = "AMD/Fujitsu";
250 break;
251 case CFI_VEND_INTEL_ECS:
252 vend_str = "Intel/Sharp";
253 break;
254 case CFI_VEND_INTEL_SCS:
255 vend_str = "Intel";
256 break;
257 case CFI_VEND_MITSUBISHI_ECS:
258 case CFI_VEND_MITSUBISHI_SCS:
259 vend_str = "Mitsubishi";
260 break;
261 default:
262 vend_str = "Unknown vendor";
263 break;
264 }
265
266 /* Get the device size. */
267 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
268
269 /* Sanity-check the I/F */
270 iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
271 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
272
273 /*
274 * Adding 1 to iface will give us a bit-wise "switch"
275 * that allows us to test for the interface width by
276 * testing a single bit.
277 */
278 iface++;
279
280 error = (iface & sc->sc_width) ? 0 : EINVAL;
281 if (error)
282 goto out;
283
284 snprintf(desc, sizeof(desc), "%s - %s", vend_str,
285 cfi_fmtsize(sc->sc_size));
286 device_set_desc_copy(dev, desc);
287
288 out:
289 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
290 return (error);
291 }
292
293 int
294 cfi_attach(device_t dev)
295 {
296 struct cfi_softc *sc;
297 u_int blksz, blocks;
298 u_int r, u;
299 uint64_t mtoexp, ttoexp;
300 #ifdef CFI_SUPPORT_STRATAFLASH
301 uint64_t ppr;
302 char name[KENV_MNAMELEN], value[32];
303 #endif
304
305 sc = device_get_softc(dev);
306 sc->sc_dev = dev;
307
308 sc->sc_rid = 0;
309 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
310 #ifndef ATSE_CFI_HACK
311 RF_ACTIVE);
312 #else
313 RF_ACTIVE | RF_SHAREABLE);
314 #endif
315 if (sc->sc_res == NULL)
316 return (ENXIO);
317
318 sc->sc_tag = rman_get_bustag(sc->sc_res);
319 sc->sc_handle = rman_get_bushandle(sc->sc_res);
320
321 /* Get time-out values for erase, write, and buffer write. */
322 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
323 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
324 if (ttoexp == 0) {
325 device_printf(dev, "erase timeout == 0, using 2^16ms\n");
326 ttoexp = 16;
327 }
328 if (ttoexp > 41) {
329 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
330 return (EINVAL);
331 }
332 if (mtoexp == 0) {
333 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
334 ttoexp + 4);
335 mtoexp = 4;
336 }
337 if (ttoexp + mtoexp > 41) {
338 device_printf(dev, "insane max erase timeout: 2^%jd\n",
339 ttoexp + mtoexp);
340 return (EINVAL);
341 }
342 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
343 sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
344 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
345
346 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
347 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
348 if (ttoexp == 0) {
349 device_printf(dev, "write timeout == 0, using 2^18ns\n");
350 ttoexp = 18;
351 }
352 if (ttoexp > 51) {
353 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
354 return (EINVAL);
355 }
356 if (mtoexp == 0) {
357 device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
358 ttoexp + 4);
359 mtoexp = 4;
360 }
361 if (ttoexp + mtoexp > 51) {
362 device_printf(dev, "insane max write timeout: 2^%jdus\n",
363 ttoexp + mtoexp);
364 return (EINVAL);
365 }
366 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
367 sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
368 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
369
370 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
371 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
372 /* Don't check for 0, it means not-supported. */
373 if (ttoexp > 51) {
374 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
375 return (EINVAL);
376 }
377 if (ttoexp + mtoexp > 51) {
378 device_printf(dev, "insane max write timeout: 2^%jdus\n",
379 ttoexp + mtoexp);
380 return (EINVAL);
381 }
382 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
383 SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
384 sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
385 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
386 (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
387
388 /* Get the maximum size of a multibyte program */
389 if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
390 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
391 cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
392 else
393 sc->sc_maxbuf = 0;
394
395 /* Get erase regions. */
396 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
397 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
398 M_TEMP, M_WAITOK | M_ZERO);
399 for (r = 0; r < sc->sc_regions; r++) {
400 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
401 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
402 sc->sc_region[r].r_blocks = blocks + 1;
403
404 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
405 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
406 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
407 blksz * 256;
408 }
409
410 /* Reset the device to a default state. */
411 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
412
413 if (bootverbose) {
414 device_printf(dev, "[");
415 for (r = 0; r < sc->sc_regions; r++) {
416 printf("%ux%s%s", sc->sc_region[r].r_blocks,
417 cfi_fmtsize(sc->sc_region[r].r_blksz),
418 (r == sc->sc_regions - 1) ? "]\n" : ",");
419 }
420 }
421
422 if (sc->sc_cmdset == CFI_VEND_AMD_ECS ||
423 sc->sc_cmdset == CFI_VEND_AMD_SCS) {
424 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_AUTO_SELECT);
425 sc->sc_manid = cfi_read(sc, 0);
426 sc->sc_devid = cfi_read(sc, 2);
427 device_printf(dev, "Manufacturer ID:%x Device ID:%x\n",
428 sc->sc_manid, sc->sc_devid);
429 cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
430 }
431
432 u = device_get_unit(dev);
433 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
434 "%s%u", cfi_driver_name, u);
435 sc->sc_nod->si_drv1 = sc;
436
437 cfi_add_sysctls(sc);
438
439 #ifdef CFI_SUPPORT_STRATAFLASH
440 /*
441 * Store the Intel factory PPR in the environment. In some
442 * cases it is the most unique ID on a board.
443 */
444 if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
445 if (snprintf(name, sizeof(name), "%s.factory_ppr",
446 device_get_nameunit(dev)) < (sizeof(name) - 1) &&
447 snprintf(value, sizeof(value), "0x%016jx", ppr) <
448 (sizeof(value) - 1))
449 (void) kern_setenv(name, value);
450 }
451 #endif
452
453 device_add_child(dev, "cfid", -1);
454 bus_generic_attach(dev);
455
456 return (0);
457 }
458
459 static void
460 cfi_add_sysctls(struct cfi_softc *sc)
461 {
462 struct sysctl_ctx_list *ctx;
463 struct sysctl_oid_list *children;
464
465 ctx = device_get_sysctl_ctx(sc->sc_dev);
466 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
467
468 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
469 "typical_erase_timout_count",
470 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
471 0, "Number of times the typical erase timeout was exceeded");
472 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
473 "max_erase_timout_count",
474 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
475 "Number of times the maximum erase timeout was exceeded");
476 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
477 "typical_write_timout_count",
478 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
479 "Number of times the typical write timeout was exceeded");
480 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
481 "max_write_timout_count",
482 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
483 "Number of times the maximum write timeout was exceeded");
484 if (sc->sc_maxbuf > 0) {
485 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
486 "typical_bufwrite_timout_count",
487 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
488 "Number of times the typical buffered write timeout was "
489 "exceeded");
490 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
491 "max_bufwrite_timout_count",
492 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
493 "Number of times the maximum buffered write timeout was "
494 "exceeded");
495 }
496 }
497
498 int
499 cfi_detach(device_t dev)
500 {
501 struct cfi_softc *sc;
502
503 sc = device_get_softc(dev);
504
505 destroy_dev(sc->sc_nod);
506 free(sc->sc_region, M_TEMP);
507 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
508 return (0);
509 }
510
511 static bool
512 cfi_check_erase(struct cfi_softc *sc, u_int ofs, u_int sz)
513 {
514 bool result;
515 int i;
516 uint32_t val;
517
518 result = FALSE;
519 for (i = 0; i < sz; i += sc->sc_width) {
520 val = cfi_read(sc, ofs + i);
521 switch (sc->sc_width) {
522 case 1:
523 if (val != 0xff)
524 goto out;
525 continue;
526 case 2:
527 if (val != 0xffff)
528 goto out;
529 continue;
530 case 4:
531 if (val != 0xffffffff)
532 goto out;
533 continue;
534 }
535 }
536 result = TRUE;
537
538 out:
539 return (result);
540 }
541
542 static int
543 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
544 enum cfi_wait_cmd cmd)
545 {
546 int done, error, tto_exceeded;
547 uint32_t st0 = 0, st = 0;
548 sbintime_t now;
549
550 done = 0;
551 error = 0;
552 tto_exceeded = 0;
553 while (!done && !error) {
554 /*
555 * Save time before we start so we always do one check
556 * after the timeout has expired.
557 */
558 now = sbinuptime();
559
560 switch (sc->sc_cmdset) {
561 case CFI_VEND_INTEL_ECS:
562 case CFI_VEND_INTEL_SCS:
563 st = cfi_read(sc, ofs);
564 done = (st & CFI_INTEL_STATUS_WSMS);
565 if (done) {
566 /* NB: bit 0 is reserved */
567 st &= ~(CFI_INTEL_XSTATUS_RSVD |
568 CFI_INTEL_STATUS_WSMS |
569 CFI_INTEL_STATUS_RSVD);
570 if (st & CFI_INTEL_STATUS_DPS)
571 error = EPERM;
572 else if (st & CFI_INTEL_STATUS_PSLBS)
573 error = EIO;
574 else if (st & CFI_INTEL_STATUS_ECLBS)
575 error = ENXIO;
576 else if (st)
577 error = EACCES;
578 }
579 break;
580 case CFI_VEND_AMD_SCS:
581 case CFI_VEND_AMD_ECS:
582 st0 = cfi_read(sc, ofs);
583 st = cfi_read(sc, ofs);
584 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
585 break;
586 }
587
588 if (tto_exceeded ||
589 now > start + sc->sc_typical_timeouts[cmd]) {
590 if (!tto_exceeded) {
591 tto_exceeded = 1;
592 sc->sc_tto_counts[cmd]++;
593 #ifdef CFI_DEBUG_TIMEOUT
594 device_printf(sc->sc_dev,
595 "typical timeout exceeded (cmd %d)", cmd);
596 #endif
597 }
598 if (now > start + sc->sc_max_timeouts[cmd]) {
599 sc->sc_mto_counts[cmd]++;
600 #ifdef CFI_DEBUG_TIMEOUT
601 device_printf(sc->sc_dev,
602 "max timeout exceeded (cmd %d)", cmd);
603 #endif
604 }
605 }
606 }
607 if (!done && !error)
608 error = ETIMEDOUT;
609 if (error)
610 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
611 return (error);
612 }
613
614 int
615 cfi_write_block(struct cfi_softc *sc)
616 {
617 union {
618 uint8_t *x8;
619 uint16_t *x16;
620 uint32_t *x32;
621 } ptr, cpyprt;
622 register_t intr;
623 int error, i, j, neederase = 0;
624 uint32_t st;
625 u_int wlen;
626 sbintime_t start;
627 u_int minsz;
628 uint32_t val;
629
630 /* Intel flash must be unlocked before modification */
631 switch (sc->sc_cmdset) {
632 case CFI_VEND_INTEL_ECS:
633 case CFI_VEND_INTEL_SCS:
634 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
635 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
636 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
637 break;
638 }
639
640 /* Check if an erase is required. */
641 for (i = 0; i < sc->sc_wrbufsz; i++)
642 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
643 neederase = 1;
644 break;
645 }
646
647 if (neederase) {
648 intr = intr_disable();
649 start = sbinuptime();
650 /* Erase the block. */
651 switch (sc->sc_cmdset) {
652 case CFI_VEND_INTEL_ECS:
653 case CFI_VEND_INTEL_SCS:
654 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
655 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
656 break;
657 case CFI_VEND_AMD_SCS:
658 case CFI_VEND_AMD_ECS:
659 /* find minimum sector size */
660 minsz = sc->sc_region[0].r_blksz;
661 for (i = 1; i < sc->sc_regions; i++) {
662 if (sc->sc_region[i].r_blksz < minsz)
663 minsz = sc->sc_region[i].r_blksz;
664 }
665 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
666 CFI_AMD_ERASE_SECTOR);
667 cfi_amd_write(sc, sc->sc_wrofs,
668 sc->sc_wrofs >> (ffs(minsz) - 1),
669 CFI_AMD_BLOCK_ERASE);
670 for (i = 0; i < CFI_AMD_MAXCHK; ++i) {
671 if (cfi_check_erase(sc, sc->sc_wrofs,
672 sc->sc_wrbufsz))
673 break;
674 DELAY(10);
675 }
676 if (i == CFI_AMD_MAXCHK) {
677 printf("\nCFI Sector Erase time out error\n");
678 return (ENODEV);
679 }
680 break;
681 default:
682 /* Better safe than sorry... */
683 intr_restore(intr);
684 return (ENODEV);
685 }
686 intr_restore(intr);
687 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
688 CFI_TIMEOUT_ERASE);
689 if (error)
690 goto out;
691 } else
692 error = 0;
693
694 /* Write the block using a multibyte write if supported. */
695 ptr.x8 = sc->sc_wrbuf;
696 cpyprt.x8 = sc->sc_wrbufcpy;
697 if (sc->sc_maxbuf > sc->sc_width) {
698 switch (sc->sc_cmdset) {
699 case CFI_VEND_INTEL_ECS:
700 case CFI_VEND_INTEL_SCS:
701 for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
702 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
703
704 intr = intr_disable();
705
706 start = sbinuptime();
707 do {
708 cfi_write(sc, sc->sc_wrofs + i,
709 CFI_BCS_BUF_PROG_SETUP);
710 if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
711 error = ETIMEDOUT;
712 goto out;
713 }
714 st = cfi_read(sc, sc->sc_wrofs + i);
715 } while (! (st & CFI_INTEL_STATUS_WSMS));
716
717 cfi_write(sc, sc->sc_wrofs + i,
718 (wlen / sc->sc_width) - 1);
719 switch (sc->sc_width) {
720 case 1:
721 bus_space_write_region_1(sc->sc_tag,
722 sc->sc_handle, sc->sc_wrofs + i,
723 ptr.x8 + i, wlen);
724 break;
725 case 2:
726 bus_space_write_region_2(sc->sc_tag,
727 sc->sc_handle, sc->sc_wrofs + i,
728 ptr.x16 + i / 2, wlen / 2);
729 break;
730 case 4:
731 bus_space_write_region_4(sc->sc_tag,
732 sc->sc_handle, sc->sc_wrofs + i,
733 ptr.x32 + i / 4, wlen / 4);
734 break;
735 }
736
737 cfi_write(sc, sc->sc_wrofs + i,
738 CFI_BCS_CONFIRM);
739
740 intr_restore(intr);
741
742 error = cfi_wait_ready(sc, sc->sc_wrofs + i,
743 start, CFI_TIMEOUT_BUFWRITE);
744 if (error != 0)
745 goto out;
746 }
747 goto out;
748 default:
749 /* Fall through to single word case */
750 break;
751 }
752 }
753
754 /* Write the block one byte/word at a time. */
755 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
756 /* Avoid writing unless we are actually changing bits */
757 if (!neederase) {
758 switch (sc->sc_width) {
759 case 1:
760 if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
761 continue;
762 break;
763 case 2:
764 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
765 continue;
766 break;
767 case 4:
768 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
769 continue;
770 break;
771 }
772 }
773
774 /*
775 * Make sure the command to start a write and the
776 * actual write happens back-to-back without any
777 * excessive delays.
778 */
779 intr = intr_disable();
780
781 start = sbinuptime();
782 switch (sc->sc_cmdset) {
783 case CFI_VEND_INTEL_ECS:
784 case CFI_VEND_INTEL_SCS:
785 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
786 break;
787 case CFI_VEND_AMD_SCS:
788 case CFI_VEND_AMD_ECS:
789 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
790 break;
791 }
792 switch (sc->sc_width) {
793 case 1:
794 bus_space_write_1(sc->sc_tag, sc->sc_handle,
795 sc->sc_wrofs + i, *(ptr.x8 + i));
796 break;
797 case 2:
798 bus_space_write_2(sc->sc_tag, sc->sc_handle,
799 sc->sc_wrofs + i, *(ptr.x16 + i / 2));
800 break;
801 case 4:
802 bus_space_write_4(sc->sc_tag, sc->sc_handle,
803 sc->sc_wrofs + i, *(ptr.x32 + i / 4));
804 break;
805 }
806
807 intr_restore(intr);
808
809 if (sc->sc_cmdset == CFI_VEND_AMD_ECS ||
810 sc->sc_cmdset == CFI_VEND_AMD_SCS) {
811 for (j = 0; j < CFI_AMD_MAXCHK; ++j) {
812 switch (sc->sc_width) {
813 case 1:
814 val = *(ptr.x8 + i);
815 break;
816 case 2:
817 val = *(ptr.x16 + i / 2);
818 break;
819 case 4:
820 val = *(ptr.x32 + i / 4);
821 break;
822 }
823
824 if (cfi_read(sc, sc->sc_wrofs + i) == val)
825 break;
826
827 DELAY(10);
828 }
829 if (j == CFI_AMD_MAXCHK) {
830 printf("\nCFI Program Verify time out error\n");
831 error = ENXIO;
832 goto out;
833 }
834 } else {
835 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
836 CFI_TIMEOUT_WRITE);
837 if (error)
838 goto out;
839 }
840 }
841
842 /* error is 0. */
843
844 out:
845 cfi_reset_default(sc);
846
847 /* Relock Intel flash */
848 switch (sc->sc_cmdset) {
849 case CFI_VEND_INTEL_ECS:
850 case CFI_VEND_INTEL_SCS:
851 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
852 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
853 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
854 break;
855 }
856 return (error);
857 }
858
859 #ifdef CFI_SUPPORT_STRATAFLASH
860 /*
861 * Intel StrataFlash Protection Register Support.
862 *
863 * The memory includes a 128-bit Protection Register that can be
864 * used for security. There are two 64-bit segments; one is programmed
865 * at the factory with a unique 64-bit number which is immutable.
866 * The other segment is left blank for User (OEM) programming.
867 * The User/OEM segment is One Time Programmable (OTP). It can also
868 * be locked to prevent any further writes by setting bit 0 of the
869 * Protection Lock Register (PLR). The PLR can written only once.
870 */
871
872 static uint16_t
873 cfi_get16(struct cfi_softc *sc, int off)
874 {
875 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
876 return v;
877 }
878
879 #ifdef CFI_ARMEDANDDANGEROUS
880 static void
881 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
882 {
883 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
884 }
885 #endif
886
887 /*
888 * Read the factory-defined 64-bit segment of the PR.
889 */
890 int
891 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
892 {
893 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
894 return EOPNOTSUPP;
895 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
896
897 cfi_write(sc, 0, CFI_INTEL_READ_ID);
898 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
899 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
900 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
901 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
902 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
903 return 0;
904 }
905
906 /*
907 * Read the User/OEM 64-bit segment of the PR.
908 */
909 int
910 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
911 {
912 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
913 return EOPNOTSUPP;
914 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
915
916 cfi_write(sc, 0, CFI_INTEL_READ_ID);
917 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
918 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
919 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
920 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
921 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
922 return 0;
923 }
924
925 /*
926 * Write the User/OEM 64-bit segment of the PR.
927 * XXX should allow writing individual words/bytes
928 */
929 int
930 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
931 {
932 #ifdef CFI_ARMEDANDDANGEROUS
933 register_t intr;
934 int i, error;
935 sbintime_t start;
936 #endif
937
938 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
939 return EOPNOTSUPP;
940 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
941
942 #ifdef CFI_ARMEDANDDANGEROUS
943 for (i = 7; i >= 4; i--, id >>= 16) {
944 intr = intr_disable();
945 start = sbinuptime();
946 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
947 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
948 intr_restore(intr);
949 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
950 CFI_TIMEOUT_WRITE);
951 if (error)
952 break;
953 }
954 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
955 return error;
956 #else
957 device_printf(sc->sc_dev, "%s: OEM PR not set, "
958 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
959 return ENXIO;
960 #endif
961 }
962
963 /*
964 * Read the contents of the Protection Lock Register.
965 */
966 int
967 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
968 {
969 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
970 return EOPNOTSUPP;
971 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
972
973 cfi_write(sc, 0, CFI_INTEL_READ_ID);
974 *plr = cfi_get16(sc, CFI_INTEL_PLR);
975 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
976 return 0;
977 }
978
979 /*
980 * Write the Protection Lock Register to lock down the
981 * user-settable segment of the Protection Register.
982 * NOTE: this operation is not reversible.
983 */
984 int
985 cfi_intel_set_plr(struct cfi_softc *sc)
986 {
987 #ifdef CFI_ARMEDANDDANGEROUS
988 register_t intr;
989 int error;
990 sbintime_t start;
991 #endif
992 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
993 return EOPNOTSUPP;
994 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
995
996 #ifdef CFI_ARMEDANDDANGEROUS
997 /* worthy of console msg */
998 device_printf(sc->sc_dev, "set PLR\n");
999 intr = intr_disable();
1000 binuptime(&start);
1001 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
1002 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
1003 intr_restore(intr);
1004 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
1005 CFI_TIMEOUT_WRITE);
1006 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
1007 return error;
1008 #else
1009 device_printf(sc->sc_dev, "%s: PLR not set, "
1010 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
1011 return ENXIO;
1012 #endif
1013 }
1014 #endif /* CFI_SUPPORT_STRATAFLASH */
Cache object: 2805a5d0cec7a74787d55d132978e60e
|