1 /*-
2 * Copyright (c) 2006-2008 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 /*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 /*
63 * Intel XScale Network Processing Engine (NPE) support.
64 *
65 * Each NPE has an ixpnpeX device associated with it that is
66 * attached at boot. Depending on the microcode loaded into
67 * an NPE there may be an Ethernet interface (npeX) or some
68 * other network interface (e.g. for ATM). This file has support
69 * for loading microcode images and the associated NPE CPU
70 * manipulations (start, stop, reset).
71 *
72 * The code here basically replaces the npeDl and npeMh classes
73 * in the Intel Access Library (IAL).
74 *
75 * NB: Microcode images are loaded with firmware(9). To
76 * include microcode in a static kernel include the
77 * ixpnpe_fw device. Otherwise the firmware will be
78 * automatically loaded from the filesystem.
79 */
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/module.h>
85 #include <sys/time.h>
86 #include <sys/bus.h>
87 #include <sys/resource.h>
88 #include <sys/rman.h>
89 #include <sys/sysctl.h>
90
91 #include <sys/linker.h>
92 #include <sys/firmware.h>
93
94 #include <machine/bus.h>
95 #include <machine/resource.h>
96 #include <machine/intr.h>
97 #include <arm/xscale/ixp425/ixp425reg.h>
98 #include <arm/xscale/ixp425/ixp425var.h>
99
100 #include <arm/xscale/ixp425/ixp425_npereg.h>
101 #include <arm/xscale/ixp425/ixp425_npevar.h>
102
103 struct ixpnpe_softc {
104 device_t sc_dev;
105 bus_space_tag_t sc_iot;
106 bus_space_handle_t sc_ioh;
107 bus_size_t sc_size; /* size of mapped register window */
108 struct resource *sc_irq; /* IRQ resource */
109 void *sc_ih; /* interrupt handler */
110 struct mtx sc_mtx; /* mailbox lock */
111 uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */
112 int sc_msgwaiting; /* sc_msg holds valid data */
113 int sc_npeid;
114 int sc_nrefs; /* # of references */
115
116 int validImage; /* valid ucode image loaded */
117 int started; /* NPE is started */
118 uint8_t functionalityId;/* ucode functionality ID */
119 int insMemSize; /* size of instruction memory */
120 int dataMemSize; /* size of data memory */
121 uint32_t savedExecCount;
122 uint32_t savedEcsDbgCtxtReg2;
123 };
124 static struct ixpnpe_softc *npes[NPE_MAX];
125
126 #define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff
127
128 /* used to read download map from version in microcode image */
129 #define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000
130 #define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001
131 #define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002
132 #define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F
133
134 /*
135 * masks used to extract address info from State information context
136 * register addresses as read from microcode image
137 */
138 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F
139 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0
140
141 /* LSB offset of Context Number field in State-Info Context Address */
142 #define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4
143
144 /* size (in words) of single State Information entry (ctxt reg address|data) */
145 #define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2
146
147 typedef struct {
148 uint32_t type;
149 uint32_t offset;
150 } IxNpeDlNpeMgrDownloadMapBlockEntry;
151
152 typedef union {
153 IxNpeDlNpeMgrDownloadMapBlockEntry block;
154 uint32_t eodmMarker;
155 } IxNpeDlNpeMgrDownloadMapEntry;
156
157 typedef struct {
158 /* 1st entry in the download map (there may be more than one) */
159 IxNpeDlNpeMgrDownloadMapEntry entry[1];
160 } IxNpeDlNpeMgrDownloadMap;
161
162 /* used to access an instruction or data block in a microcode image */
163 typedef struct {
164 uint32_t npeMemAddress;
165 uint32_t size;
166 uint32_t data[1];
167 } IxNpeDlNpeMgrCodeBlock;
168
169 /* used to access each Context Reg entry state-information block */
170 typedef struct {
171 uint32_t addressInfo;
172 uint32_t value;
173 } IxNpeDlNpeMgrStateInfoCtxtRegEntry;
174
175 /* used to access a state-information block in a microcode image */
176 typedef struct {
177 uint32_t size;
178 IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1];
179 } IxNpeDlNpeMgrStateInfoBlock;
180
181 static int npe_debug = 0;
182 SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RWTUN, &npe_debug,
183 0, "IXP4XX NPE debug msgs");
184 #define DPRINTF(dev, fmt, ...) do { \
185 if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \
186 } while (0)
187 #define DPRINTFn(n, dev, fmt, ...) do { \
188 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \
189 } while (0)
190
191 static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t);
192 static int npe_isstopped(struct ixpnpe_softc *);
193 static int npe_load_ins(struct ixpnpe_softc *,
194 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
195 static int npe_load_data(struct ixpnpe_softc *,
196 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
197 static int npe_load_stateinfo(struct ixpnpe_softc *,
198 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify);
199 static int npe_load_image(struct ixpnpe_softc *,
200 const uint32_t *imageCodePtr, int verify);
201 static int npe_cpu_reset(struct ixpnpe_softc *);
202 static int npe_cpu_start(struct ixpnpe_softc *);
203 static int npe_cpu_stop(struct ixpnpe_softc *);
204 static void npe_cmd_issue_write(struct ixpnpe_softc *,
205 uint32_t cmd, uint32_t addr, uint32_t data);
206 static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *,
207 uint32_t cmd, uint32_t addr);
208 static int npe_ins_write(struct ixpnpe_softc *,
209 uint32_t addr, uint32_t data, int verify);
210 static int npe_data_write(struct ixpnpe_softc *,
211 uint32_t addr, uint32_t data, int verify);
212 static void npe_ecs_reg_write(struct ixpnpe_softc *,
213 uint32_t reg, uint32_t data);
214 static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg);
215 static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command);
216 static void npe_cpu_step_save(struct ixpnpe_softc *);
217 static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction,
218 uint32_t ctxtNum, uint32_t ldur);
219 static void npe_cpu_step_restore(struct ixpnpe_softc *);
220 static int npe_logical_reg_read(struct ixpnpe_softc *,
221 uint32_t regAddr, uint32_t regSize,
222 uint32_t ctxtNum, uint32_t *regVal);
223 static int npe_logical_reg_write(struct ixpnpe_softc *,
224 uint32_t regAddr, uint32_t regVal,
225 uint32_t regSize, uint32_t ctxtNum, int verify);
226 static int npe_physical_reg_write(struct ixpnpe_softc *,
227 uint32_t regAddr, uint32_t regValue, int verify);
228 static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum,
229 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify);
230
231 static void ixpnpe_intr(void *arg);
232
233 static uint32_t
234 npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off)
235 {
236 uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
237 DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v);
238 return v;
239 }
240
241 static void
242 npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val)
243 {
244 DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val);
245 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
246 }
247
248 struct ixpnpe_softc *
249 ixpnpe_attach(device_t dev, int npeid)
250 {
251 struct npeconfig {
252 uint32_t base;
253 uint32_t size;
254 int irq;
255 uint32_t ins_memsize;
256 uint32_t data_memsize;
257 };
258 static const struct npeconfig npeconfigs[NPE_MAX] = {
259 [NPE_A] = {
260 .base = IXP425_NPE_A_HWBASE,
261 .size = IXP425_NPE_A_SIZE,
262 .irq = IXP425_INT_NPE_A,
263 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEA,
264 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA
265 },
266 [NPE_B] = {
267 .base = IXP425_NPE_B_HWBASE,
268 .size = IXP425_NPE_B_SIZE,
269 .irq = IXP425_INT_NPE_B,
270 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB,
271 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB
272 },
273 [NPE_C] = {
274 .base = IXP425_NPE_C_HWBASE,
275 .size = IXP425_NPE_C_SIZE,
276 .irq = IXP425_INT_NPE_C,
277 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC,
278 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC
279 },
280 };
281 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
282 struct ixpnpe_softc *sc;
283 const struct npeconfig *config;
284 int rid;
285
286 if (npeid >= NPE_MAX) {
287 device_printf(dev, "%s: bad npeid %d\n", __func__, npeid);
288 return NULL;
289 }
290 sc = npes[npeid];
291 if (sc != NULL) {
292 sc->sc_nrefs++;
293 return sc;
294 }
295 config = &npeconfigs[npeid];
296
297 /* XXX M_BUS */
298 sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO);
299 sc->sc_dev = dev;
300 sc->sc_iot = sa->sc_iot;
301 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF);
302 sc->sc_npeid = npeid;
303 sc->sc_nrefs = 1;
304
305 sc->sc_size = config->size;
306 if (cpu_is_ixp42x()) {
307 /* NB: instruction/data memory sizes are NPE-dependent */
308 sc->insMemSize = config->ins_memsize;
309 sc->dataMemSize = config->data_memsize;
310 } else {
311 sc->insMemSize = IXP46X_NPEDL_INS_MEMSIZE_WORDS;
312 sc->dataMemSize = IXP46X_NPEDL_DATA_MEMSIZE_WORDS;
313 }
314
315 if (bus_space_map(sc->sc_iot, config->base, sc->sc_size, 0, &sc->sc_ioh))
316 panic("%s: Cannot map registers", device_get_name(dev));
317
318 /*
319 * Setup IRQ and handler for NPE message support.
320 */
321 rid = 0;
322 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
323 config->irq, config->irq, 1, RF_ACTIVE);
324 if (sc->sc_irq == NULL)
325 panic("%s: Unable to allocate irq %u", device_get_name(dev),
326 config->irq);
327 /* XXX could be a source of entropy */
328 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
329 NULL, ixpnpe_intr, sc, &sc->sc_ih);
330 /*
331 * Enable output fifo interrupts (NB: must also set OFIFO Write Enable)
332 */
333 npe_reg_write(sc, IX_NPECTL,
334 npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE));
335
336 npes[npeid] = sc;
337
338 return sc;
339 }
340
341 void
342 ixpnpe_detach(struct ixpnpe_softc *sc)
343 {
344 if (--sc->sc_nrefs == 0) {
345 npes[sc->sc_npeid] = NULL;
346
347 /* disable output fifo interrupts */
348 npe_reg_write(sc, IX_NPECTL,
349 npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE));
350
351 bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih);
352 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
353 mtx_destroy(&sc->sc_mtx);
354 free(sc, M_TEMP);
355 }
356 }
357
358 int
359 ixpnpe_stopandreset(struct ixpnpe_softc *sc)
360 {
361 int error;
362
363 mtx_lock(&sc->sc_mtx);
364 error = npe_cpu_stop(sc); /* stop NPE */
365 if (error == 0)
366 error = npe_cpu_reset(sc); /* reset it */
367 if (error == 0)
368 sc->started = 0; /* mark stopped */
369 mtx_unlock(&sc->sc_mtx);
370
371 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
372 return error;
373 }
374
375 static int
376 ixpnpe_start_locked(struct ixpnpe_softc *sc)
377 {
378 int error;
379
380 if (!sc->started) {
381 error = npe_cpu_start(sc);
382 if (error == 0)
383 sc->started = 1;
384 } else
385 error = 0;
386
387 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
388 return error;
389 }
390
391 int
392 ixpnpe_start(struct ixpnpe_softc *sc)
393 {
394 int ret;
395
396 mtx_lock(&sc->sc_mtx);
397 ret = ixpnpe_start_locked(sc);
398 mtx_unlock(&sc->sc_mtx);
399 return (ret);
400 }
401
402 int
403 ixpnpe_stop(struct ixpnpe_softc *sc)
404 {
405 int error;
406
407 mtx_lock(&sc->sc_mtx);
408 error = npe_cpu_stop(sc);
409 if (error == 0)
410 sc->started = 0;
411 mtx_unlock(&sc->sc_mtx);
412
413 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
414 return error;
415 }
416
417 /*
418 * Indicates the start of an NPE Image, in new NPE Image Library format.
419 * 2 consecutive occurrences indicates the end of the NPE Image Library
420 */
421 #define NPE_IMAGE_MARKER 0xfeedf00d
422
423 /*
424 * NPE Image Header definition, used in new NPE Image Library format
425 */
426 typedef struct {
427 uint32_t marker;
428 uint32_t id;
429 uint32_t size;
430 } IxNpeDlImageMgrImageHeader;
431
432 static int
433 npe_findimage(struct ixpnpe_softc *sc,
434 const uint32_t *imageLibrary, uint32_t imageId,
435 const uint32_t **imagePtr, uint32_t *imageSize)
436 {
437 const IxNpeDlImageMgrImageHeader *image;
438 uint32_t offset = 0;
439
440 while (imageLibrary[offset] == NPE_IMAGE_MARKER) {
441 image = (const IxNpeDlImageMgrImageHeader *)
442 &imageLibrary[offset];
443 offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t);
444
445 DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n",
446 __func__, offset, image->marker, image->id, image->size);
447 if (image->id == imageId) {
448 *imagePtr = imageLibrary + offset;
449 *imageSize = image->size;
450 return 0;
451 }
452 /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */
453 if (image->id == NPE_IMAGE_MARKER) {
454 DPRINTF(sc->sc_dev, "imageId 0x%08x not found in "
455 "image library header\n", imageId);
456 /* reached end of library, image not found */
457 return ESRCH;
458 }
459 offset += image->size;
460 }
461 return ESRCH;
462 }
463
464 static int
465 ixpnpe_load_firmware(struct ixpnpe_softc *sc, const char *imageName,
466 uint32_t imageId)
467 {
468 static const char *devname[4] =
469 { "IXP425", "IXP435/IXP465", "DeviceID#2", "DeviceID#3" };
470 uint32_t imageSize;
471 const uint32_t *imageCodePtr;
472 const struct firmware *fw;
473 int error;
474
475 DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId);
476
477 #if 0
478 IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId);
479 /*
480 * Checking if image being loaded is meant for device that is running.
481 * Image is forward compatible. i.e Image built for IXP42X should run
482 * on IXP46X but not vice versa.
483 */
484 if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK))
485 return EINVAL;
486 #endif
487 error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */
488 if (error != 0)
489 return error;
490
491 fw = firmware_get(imageName);
492 if (fw == NULL)
493 return ENOENT;
494
495 /* Locate desired image in files w/ combined images */
496 error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize);
497 if (error != 0)
498 goto done;
499
500 device_printf(sc->sc_dev,
501 "load fw image %s.NPE-%c Func 0x%x Rev %u.%u\n",
502 devname[NPEIMAGE_DEVID(imageId)], 'A' + NPEIMAGE_NPEID(imageId),
503 NPEIMAGE_FUNCID(imageId), NPEIMAGE_MAJOR(imageId),
504 NPEIMAGE_MINOR(imageId));
505
506 /*
507 * If download was successful, store image Id in list of
508 * currently loaded images. If a critical error occurred
509 * during download, record that the NPE has an invalid image
510 */
511 mtx_lock(&sc->sc_mtx);
512 error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/);
513 if (error == 0) {
514 sc->validImage = 1;
515 error = ixpnpe_start_locked(sc);
516 } else {
517 sc->validImage = 0;
518 }
519 sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId);
520 mtx_unlock(&sc->sc_mtx);
521 done:
522 firmware_put(fw, FIRMWARE_UNLOAD);
523 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
524 return error;
525 }
526
527 static int
528 override_imageid(device_t dev, const char *resname, uint32_t *val)
529 {
530 int unit = device_get_unit(dev);
531 int resval;
532
533 if (resource_int_value("npe", unit, resname, &resval) != 0)
534 return 0;
535 /* XXX validate */
536 if (bootverbose)
537 device_printf(dev, "using npe.%d.%s=0x%x override\n",
538 unit, resname, resval);
539 *val = resval;
540 return 1;
541 }
542
543 int
544 ixpnpe_init(struct ixpnpe_softc *sc)
545 {
546 static const uint32_t npeconfig[NPE_MAX] = {
547 [NPE_A] = IXP425_NPE_A_IMAGEID,
548 [NPE_B] = IXP425_NPE_B_IMAGEID,
549 [NPE_C] = IXP425_NPE_C_IMAGEID,
550 };
551 uint32_t imageid, msg[2];
552 int error;
553
554 if (sc->started)
555 return 0;
556 /*
557 * Load NPE firmware and start it running. We assume
558 * that minor version bumps remain compatible so probe
559 * the firmware image starting with the expected version
560 * and then bump the minor version up to the max.
561 */
562 if (!override_imageid(sc->sc_dev, "imageid", &imageid))
563 imageid = npeconfig[sc->sc_npeid];
564 for (;;) {
565 error = ixpnpe_load_firmware(sc, "npe_fw", imageid);
566 if (error == 0)
567 break;
568 /*
569 * ESRCH is returned when the requested image
570 * is not present
571 */
572 if (error != ESRCH) {
573 device_printf(sc->sc_dev,
574 "cannot init NPE (error %d)\n", error);
575 return error;
576 }
577 /* bump the minor version up to the max possible */
578 if (NPEIMAGE_MINOR(imageid) == 0xff) {
579 device_printf(sc->sc_dev, "cannot locate firmware "
580 "(imageid 0x%08x)\n", imageid);
581 return error;
582 }
583 imageid++;
584 }
585 /* NB: firmware should respond with a status msg */
586 if (ixpnpe_recvmsg_sync(sc, msg) != 0) {
587 device_printf(sc->sc_dev,
588 "firmware did not respond as expected\n");
589 return EIO;
590 }
591 return 0;
592 }
593
594 int
595 ixpnpe_getfunctionality(struct ixpnpe_softc *sc)
596 {
597 return (sc->validImage ? sc->functionalityId : 0);
598 }
599
600 static int
601 npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet)
602 {
603 uint32_t val;
604
605 val = npe_reg_read(sc, reg);
606 DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n",
607 __func__, reg, expectedBitsSet, val,
608 (val & expectedBitsSet) == expectedBitsSet);
609 return ((val & expectedBitsSet) == expectedBitsSet);
610 }
611
612 static int
613 npe_isstopped(struct ixpnpe_softc *sc)
614 {
615 return npe_checkbits(sc,
616 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP);
617 }
618
619 static int
620 npe_load_ins(struct ixpnpe_softc *sc,
621 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
622 {
623 uint32_t npeMemAddress;
624 int i, blockSize;
625
626 npeMemAddress = bp->npeMemAddress;
627 blockSize = bp->size; /* NB: instruction/data count */
628 if (npeMemAddress + blockSize > sc->insMemSize) {
629 device_printf(sc->sc_dev,
630 "Block size %u too big for NPE memory\n", blockSize);
631 return EINVAL; /* XXX */
632 }
633 for (i = 0; i < blockSize; i++, npeMemAddress++) {
634 if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
635 device_printf(sc->sc_dev,
636 "NPE instruction write failed");
637 return EIO;
638 }
639 }
640 return 0;
641 }
642
643 static int
644 npe_load_data(struct ixpnpe_softc *sc,
645 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
646 {
647 uint32_t npeMemAddress;
648 int i, blockSize;
649
650 npeMemAddress = bp->npeMemAddress;
651 blockSize = bp->size; /* NB: instruction/data count */
652 if (npeMemAddress + blockSize > sc->dataMemSize) {
653 device_printf(sc->sc_dev,
654 "Block size %u too big for NPE memory\n", blockSize);
655 return EINVAL;
656 }
657 for (i = 0; i < blockSize; i++, npeMemAddress++) {
658 if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
659 device_printf(sc->sc_dev, "NPE data write failed\n");
660 return EIO;
661 }
662 }
663 return 0;
664 }
665
666 static int
667 npe_load_stateinfo(struct ixpnpe_softc *sc,
668 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify)
669 {
670 int i, nentries, error;
671
672 npe_cpu_step_save(sc);
673
674 /* for each state-info context register entry in block */
675 nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE;
676 error = 0;
677 for (i = 0; i < nentries; i++) {
678 /* each state-info entry is 2 words (address, value) */
679 uint32_t regVal = bp->ctxtRegEntry[i].value;
680 uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo;
681
682 uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG);
683 uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >>
684 IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM;
685
686 /* error-check Context Register No. and Context Number values */
687 if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) {
688 device_printf(sc->sc_dev,
689 "invalid Context Register %u\n", reg);
690 error = EINVAL;
691 break;
692 }
693 if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) {
694 device_printf(sc->sc_dev,
695 "invalid Context Number %u\n", cNum);
696 error = EINVAL;
697 break;
698 }
699 /* NOTE that there is no STEVT register for Context 0 */
700 if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) {
701 device_printf(sc->sc_dev,
702 "no STEVT for Context 0\n");
703 error = EINVAL;
704 break;
705 }
706
707 if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) {
708 device_printf(sc->sc_dev,
709 "write of state-info to NPE failed\n");
710 error = EIO;
711 break;
712 }
713 }
714
715 npe_cpu_step_restore(sc);
716 return error;
717 }
718
719 static int
720 npe_load_image(struct ixpnpe_softc *sc,
721 const uint32_t *imageCodePtr, int verify)
722 {
723 #define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP)
724 const IxNpeDlNpeMgrDownloadMap *downloadMap;
725 int i, error;
726
727 if (!npe_isstopped(sc)) { /* verify NPE is stopped */
728 device_printf(sc->sc_dev,
729 "cannot load image, NPE not stopped\n");
730 return EIO;
731 }
732
733 /*
734 * Read Download Map, checking each block type and calling
735 * appropriate function to perform download
736 */
737 error = 0;
738 downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr;
739 for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) {
740 /* calculate pointer to block to be downloaded */
741 const uint32_t *bp = imageCodePtr +
742 downloadMap->entry[i].block.offset;
743 switch (downloadMap->entry[i].block.type) {
744 case IX_NPEDL_BLOCK_TYPE_INSTRUCTION:
745 error = npe_load_ins(sc,
746 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
747 DPRINTF(sc->sc_dev, "%s: inst, error %d\n",
748 __func__, error);
749 break;
750 case IX_NPEDL_BLOCK_TYPE_DATA:
751 error = npe_load_data(sc,
752 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
753 DPRINTF(sc->sc_dev, "%s: data, error %d\n",
754 __func__, error);
755 break;
756 case IX_NPEDL_BLOCK_TYPE_STATE:
757 error = npe_load_stateinfo(sc,
758 (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify);
759 DPRINTF(sc->sc_dev, "%s: state, error %d\n",
760 __func__, error);
761 break;
762 default:
763 device_printf(sc->sc_dev,
764 "unknown block type 0x%x in download map\n",
765 downloadMap->entry[i].block.type);
766 error = EIO; /* XXX */
767 break;
768 }
769 if (error != 0)
770 break;
771 }
772 return error;
773 #undef EOM
774 }
775
776 /* contains Reset values for Context Store Registers */
777 static const struct {
778 uint32_t regAddr;
779 uint32_t regResetVal;
780 } ixNpeDlEcsRegResetValues[] = {
781 { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET },
782 { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET },
783 { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET },
784 { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET },
785 { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET },
786 { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET },
787 { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET },
788 { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET },
789 { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET },
790 { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET },
791 { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET },
792 { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET },
793 { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET }
794 };
795
796 /* contains Reset values for Context Store Registers */
797 static const uint32_t ixNpeDlCtxtRegResetValues[] = {
798 IX_NPEDL_CTXT_REG_RESET_STEVT,
799 IX_NPEDL_CTXT_REG_RESET_STARTPC,
800 IX_NPEDL_CTXT_REG_RESET_REGMAP,
801 IX_NPEDL_CTXT_REG_RESET_CINDEX,
802 };
803
804 #define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF
805 #define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF
806
807 #if 0
808 /*
809 * Reset the NPE and its coprocessor using the
810 * fuse bits in the feature control register.
811 */
812 static void
813 npe_reset(int npeid)
814 {
815 uint32_t mask = EXP_FCTRL_NPEA << npeid;
816 uint32_t v;
817
818 v = ixp4xx_read_feature_bits();
819 ixp4xx_write_feature_bits(v &~ mask);
820 /* un-fuse and un-reset the NPE & coprocessor */
821 ixp4xx_write_feature_bits(v | mask);
822 }
823 #endif
824
825 static int
826 npe_cpu_reset(struct ixpnpe_softc *sc)
827 {
828 #define N(a) (sizeof(a) / sizeof(a[0]))
829 uint32_t ctxtReg; /* identifies Context Store reg (0-3) */
830 uint32_t regAddr;
831 uint32_t regVal;
832 uint32_t ixNpeConfigCtrlRegVal;
833 int i, error = 0;
834
835 /* pre-store the NPE Config Control Register Value */
836 ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL);
837 ixNpeConfigCtrlRegVal |= 0x3F000000;
838
839 /* disable the parity interrupt */
840 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
841 (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK));
842 DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n",
843 __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK);
844
845 npe_cpu_step_save(sc);
846
847 /*
848 * Clear the FIFOs.
849 */
850 while (npe_checkbits(sc,
851 IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) {
852 /* read from the Watch-point FIFO until empty */
853 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO);
854 }
855
856 while (npe_checkbits(sc,
857 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) {
858 /* read from the outFIFO until empty */
859 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO);
860 }
861
862 while (npe_checkbits(sc,
863 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) {
864 /*
865 * Step execution of the NPE instruction to read inFIFO using
866 * the Debug Executing Context stack.
867 */
868 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0);
869 if (error != 0) {
870 DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n",
871 __func__, error);
872 npe_cpu_step_restore(sc);
873 return error;
874 }
875 }
876
877 /*
878 * Reset the mailbox reg
879 */
880 /* ...from XScale side */
881 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST);
882 /* ...from NPE side */
883 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0);
884 if (error != 0) {
885 DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n",
886 __func__, error);
887 npe_cpu_step_restore(sc);
888 return error;
889 }
890
891 /*
892 * Reset the physical registers in the NPE register file:
893 * Note: no need to save/restore REGMAP for Context 0 here
894 * since all Context Store regs are reset in subsequent code.
895 */
896 for (regAddr = 0;
897 regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0;
898 regAddr++) {
899 /* for each physical register in the NPE reg file, write 0 : */
900 error = npe_physical_reg_write(sc, regAddr, 0, TRUE);
901 if (error != 0) {
902 DPRINTF(sc->sc_dev, "%s: cannot write phy reg,"
903 "error %u\n", __func__, error);
904 npe_cpu_step_restore(sc);
905 return error; /* abort reset */
906 }
907 }
908
909 /*
910 * Reset the context store:
911 */
912 for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) {
913 /* set each context's Context Store registers to reset values */
914 for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) {
915 /* NOTE that there is no STEVT register for Context 0 */
916 if (i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT)
917 continue;
918 regVal = ixNpeDlCtxtRegResetValues[ctxtReg];
919 error = npe_ctx_reg_write(sc, i, ctxtReg,
920 regVal, TRUE);
921 if (error != 0) {
922 DPRINTF(sc->sc_dev, "%s: cannot write ctx reg,"
923 "error %u\n", __func__, error);
924 npe_cpu_step_restore(sc);
925 return error; /* abort reset */
926 }
927 }
928 }
929
930 npe_cpu_step_restore(sc);
931
932 /* write Reset values to Execution Context Stack registers */
933 for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++)
934 npe_ecs_reg_write(sc,
935 ixNpeDlEcsRegResetValues[i].regAddr,
936 ixNpeDlEcsRegResetValues[i].regResetVal);
937
938 /* clear the profile counter */
939 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
940
941 /* clear registers EXCT, AP0, AP1, AP2 and AP3 */
942 for (regAddr = IX_NPEDL_REG_OFFSET_EXCT;
943 regAddr <= IX_NPEDL_REG_OFFSET_AP3;
944 regAddr += sizeof(uint32_t))
945 npe_reg_write(sc, regAddr, 0);
946
947 /* Reset the Watch-count register */
948 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0);
949 #if 0
950 /*
951 * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation
952 * XXX Removed because it breaks IXP435 operation; e.g. on Gateworks
953 * XXX 2358 boards reseting NPE-A after NPE-C is running causes both
954 * XXX npe's to stop working
955 */
956 npe_reset(sc->sc_npeid);
957 #endif
958 /*
959 * Call NpeMgr function to stop the NPE again after the Feature Control
960 * has unfused and Un-Reset the NPE and its associated Coprocessors.
961 */
962 error = npe_cpu_stop(sc);
963
964 /* restore NPE configuration bus Control Register - Parity Settings */
965 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
966 (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK));
967 DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n",
968 __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL));
969
970 return error;
971 #undef N
972 }
973
974 static int
975 npe_cpu_start(struct ixpnpe_softc *sc)
976 {
977 uint32_t ecsRegVal;
978
979 /*
980 * Ensure only Background Context Stack Level is Active by turning off
981 * the Active bit in each of the other Executing Context Stack levels.
982 */
983 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
984 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
985 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal);
986
987 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
988 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
989 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal);
990
991 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0);
992 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
993 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal);
994
995 /* clear the pipeline */
996 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
997
998 /* start NPE execution by issuing cmd through EXCTL register on NPE */
999 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START);
1000
1001 /*
1002 * Check execution status of NPE to verify operation was successful.
1003 */
1004 return npe_checkbits(sc,
1005 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO;
1006 }
1007
1008 static int
1009 npe_cpu_stop(struct ixpnpe_softc *sc)
1010 {
1011 /* stop NPE execution by issuing cmd through EXCTL register on NPE */
1012 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP);
1013
1014 /* verify that NPE Stop was successful */
1015 return npe_checkbits(sc,
1016 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO;
1017 }
1018
1019 #define IX_NPEDL_REG_SIZE_BYTE 8
1020 #define IX_NPEDL_REG_SIZE_SHORT 16
1021 #define IX_NPEDL_REG_SIZE_WORD 32
1022
1023 /*
1024 * Introduce extra read cycles after issuing read command to NPE
1025 * so that we read the register after the NPE has updated it
1026 * This is to overcome race condition between XScale and NPE
1027 */
1028 #define IX_NPEDL_DELAY_READ_CYCLES 2
1029 /*
1030 * To mask top three MSBs of 32bit word to download into NPE IMEM
1031 */
1032 #define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF;
1033
1034 static void
1035 npe_cmd_issue_write(struct ixpnpe_softc *sc,
1036 uint32_t cmd, uint32_t addr, uint32_t data)
1037 {
1038 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data);
1039 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
1040 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
1041 }
1042
1043 static uint32_t
1044 npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr)
1045 {
1046 uint32_t data;
1047 int i;
1048
1049 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
1050 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
1051 for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++)
1052 data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1053 return data;
1054 }
1055
1056 static int
1057 npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
1058 {
1059 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
1060 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data);
1061 if (verify) {
1062 uint32_t rdata;
1063
1064 /*
1065 * Write invalid data to this reg, so we can see if we're
1066 * reading the EXDATA register too early.
1067 */
1068 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
1069
1070 /*
1071 * Disabled since top 3 MSB are not used for Azusa
1072 * hardware Refer WR:IXA00053900
1073 */
1074 data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
1075
1076 rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM,
1077 addr);
1078 rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
1079
1080 if (data != rdata)
1081 return EIO;
1082 }
1083 return 0;
1084 }
1085
1086 static int
1087 npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
1088 {
1089 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
1090 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data);
1091 if (verify) {
1092 /*
1093 * Write invalid data to this reg, so we can see if we're
1094 * reading the EXDATA register too early.
1095 */
1096 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
1097 if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr))
1098 return EIO;
1099 }
1100 return 0;
1101 }
1102
1103 static void
1104 npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data)
1105 {
1106 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data);
1107 }
1108
1109 static uint32_t
1110 npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg)
1111 {
1112 return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg);
1113 }
1114
1115 static void
1116 npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command)
1117 {
1118 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command);
1119 }
1120
1121 static void
1122 npe_cpu_step_save(struct ixpnpe_softc *sc)
1123 {
1124 /* turn off the halt bit by clearing Execution Count register. */
1125 /* save reg contents 1st and restore later */
1126 sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT);
1127 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0);
1128
1129 /* ensure that IF and IE are on (temporarily), so that we don't end up
1130 * stepping forever */
1131 sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc,
1132 IX_NPEDL_ECS_DBG_CTXT_REG_2);
1133
1134 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2,
1135 (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF |
1136 IX_NPEDL_MASK_ECS_DBG_REG_2_IE));
1137 }
1138
1139 static int
1140 npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction,
1141 uint32_t ctxtNum, uint32_t ldur)
1142 {
1143 #define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000
1144 uint32_t ecsDbgRegVal;
1145 uint32_t oldWatchcount, newWatchcount;
1146 int tries;
1147
1148 /* set the Active bit, and the LDUR, in the debug level */
1149 ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
1150 (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR);
1151
1152 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal);
1153
1154 /*
1155 * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the
1156 * instruction, and set SELCTXT at ECS DEBUG Level to specify which
1157 * context store to access.
1158 * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
1159 */
1160 ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) |
1161 (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT);
1162
1163 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal);
1164
1165 /* clear the pipeline */
1166 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1167
1168 /* load NPE instruction into the instruction register */
1169 npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction);
1170
1171 /* need this value later to wait for completion of NPE execution step */
1172 oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1173
1174 /* issue a Step One command via the Execution Control register */
1175 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP);
1176
1177 /*
1178 * Force the XScale to wait until the NPE has finished execution step
1179 * NOTE that this delay will be very small, just long enough to allow a
1180 * single NPE instruction to complete execution; if instruction
1181 * execution is not completed before timeout retries, exit the while
1182 * loop.
1183 */
1184 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1185 for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES &&
1186 newWatchcount == oldWatchcount; tries++) {
1187 /* Watch Count register incr's when NPE completes an inst */
1188 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1189 }
1190 return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO;
1191 #undef IX_NPE_DL_MAX_NUM_OF_RETRIES
1192 }
1193
1194 static void
1195 npe_cpu_step_restore(struct ixpnpe_softc *sc)
1196 {
1197 /* clear active bit in debug level */
1198 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0);
1199
1200 /* clear the pipeline */
1201 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1202
1203 /* restore Execution Count register contents. */
1204 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount);
1205
1206 /* restore IF and IE bits to original values */
1207 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2);
1208 }
1209
1210 static int
1211 npe_logical_reg_read(struct ixpnpe_softc *sc,
1212 uint32_t regAddr, uint32_t regSize,
1213 uint32_t ctxtNum, uint32_t *regVal)
1214 {
1215 uint32_t npeInstruction, mask;
1216 int error;
1217
1218 switch (regSize) {
1219 case IX_NPEDL_REG_SIZE_BYTE:
1220 npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE;
1221 mask = 0xff;
1222 break;
1223 case IX_NPEDL_REG_SIZE_SHORT:
1224 npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT;
1225 mask = 0xffff;
1226 break;
1227 case IX_NPEDL_REG_SIZE_WORD:
1228 npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD;
1229 mask = 0xffffffff;
1230 break;
1231 default:
1232 return EINVAL;
1233 }
1234
1235 /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */
1236 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) |
1237 (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1238
1239 /* step execution of NPE inst using Debug Executing Context stack */
1240 error = npe_cpu_step(sc, npeInstruction, ctxtNum,
1241 IX_NPEDL_RD_INSTR_LDUR);
1242 if (error != 0) {
1243 DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n",
1244 __func__, regAddr, regSize, ctxtNum, error);
1245 return error;
1246 }
1247 /* read value of register from Execution Data register */
1248 *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1249
1250 /* align value from left to right */
1251 *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask;
1252
1253 return 0;
1254 }
1255
1256 static int
1257 npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal,
1258 uint32_t regSize, uint32_t ctxtNum, int verify)
1259 {
1260 int error;
1261
1262 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n",
1263 __func__, regAddr, regVal, regSize, ctxtNum);
1264 if (regSize == IX_NPEDL_REG_SIZE_WORD) {
1265 /*
1266 * NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3|
1267 * Write upper half-word (short) to |d0|d1|
1268 */
1269 error = npe_logical_reg_write(sc, regAddr,
1270 regVal >> IX_NPEDL_REG_SIZE_SHORT,
1271 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1272 if (error != 0)
1273 return error;
1274
1275 /* Write lower half-word (short) to |d2|d3| */
1276 error = npe_logical_reg_write(sc,
1277 regAddr + sizeof(uint16_t),
1278 regVal & 0xffff,
1279 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1280 } else {
1281 uint32_t npeInstruction;
1282
1283 switch (regSize) {
1284 case IX_NPEDL_REG_SIZE_BYTE:
1285 npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE;
1286 regVal &= 0xff;
1287 break;
1288 case IX_NPEDL_REG_SIZE_SHORT:
1289 npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT;
1290 regVal &= 0xffff;
1291 break;
1292 default:
1293 return EINVAL;
1294 }
1295 /* fill dest operand field of inst with dest reg addr */
1296 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1297
1298 /* fill src operand field of inst with least-sig 5 bits of val*/
1299 npeInstruction |=
1300 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) <<
1301 IX_NPEDL_OFFSET_INSTR_SRC);
1302
1303 /* fill coprocessor field of inst with most-sig 11 bits of val*/
1304 npeInstruction |=
1305 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) <<
1306 IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA);
1307
1308 /* step execution of NPE instruction using Debug ECS */
1309 error = npe_cpu_step(sc, npeInstruction,
1310 ctxtNum, IX_NPEDL_WR_INSTR_LDUR);
1311 }
1312 if (error != 0) {
1313 DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u "
1314 "writing reg\n", __func__, regAddr, regVal, regSize,
1315 ctxtNum, error);
1316 return error;
1317 }
1318 if (verify) {
1319 uint32_t retRegVal;
1320
1321 error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum,
1322 &retRegVal);
1323 if (error == 0 && regVal != retRegVal)
1324 error = EIO; /* XXX ambiguous */
1325 }
1326 return error;
1327 }
1328
1329 /*
1330 * There are 32 physical registers used in an NPE. These are
1331 * treated as 16 pairs of 32-bit registers. To write one of the pair,
1332 * write the pair number (0-16) to the REGMAP for Context 0. Then write
1333 * the value to register 0 or 4 in the regfile, depending on which
1334 * register of the pair is to be written
1335 */
1336 static int
1337 npe_physical_reg_write(struct ixpnpe_softc *sc,
1338 uint32_t regAddr, uint32_t regValue, int verify)
1339 {
1340 int error;
1341
1342 /*
1343 * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair
1344 * (0-16) of physical registers to write .
1345 */
1346 error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1347 (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP),
1348 IX_NPEDL_REG_SIZE_SHORT, 0, verify);
1349 if (error == 0) {
1350 /* regAddr = 0 or 4 */
1351 regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) *
1352 sizeof(uint32_t);
1353 error = npe_logical_reg_write(sc, regAddr, regValue,
1354 IX_NPEDL_REG_SIZE_WORD, 0, verify);
1355 }
1356 return error;
1357 }
1358
1359 static int
1360 npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum,
1361 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify)
1362 {
1363 DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n",
1364 __func__, ctxtNum, ctxtReg, ctxtRegVal);
1365 /*
1366 * Context 0 has no STARTPC. Instead, this value is used to set
1367 * NextPC for Background ECS, to set where NPE starts executing code
1368 */
1369 if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) {
1370 /* read BG_CTXT_REG_0, update NEXTPC bits, & write back to reg*/
1371 uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0);
1372 v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1373 v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) &
1374 IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1375
1376 npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v);
1377 return 0;
1378 } else {
1379 static const struct {
1380 uint32_t regAddress;
1381 uint32_t regSize;
1382 } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = {
1383 { IX_NPEDL_CTXT_REG_ADDR_STEVT,
1384 IX_NPEDL_REG_SIZE_BYTE },
1385 { IX_NPEDL_CTXT_REG_ADDR_STARTPC,
1386 IX_NPEDL_REG_SIZE_SHORT },
1387 { IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1388 IX_NPEDL_REG_SIZE_SHORT },
1389 { IX_NPEDL_CTXT_REG_ADDR_CINDEX,
1390 IX_NPEDL_REG_SIZE_BYTE }
1391 };
1392 return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress,
1393 ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify);
1394 }
1395 }
1396
1397 /*
1398 * NPE Mailbox support.
1399 */
1400 #define IX_NPEMH_MAXTRIES 100000
1401
1402 static int
1403 ofifo_wait(struct ixpnpe_softc *sc)
1404 {
1405 int i;
1406
1407 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1408 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE)
1409 return 1;
1410 DELAY(10);
1411 }
1412 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1413 __func__, npe_reg_read(sc, IX_NPESTAT));
1414 return 0;
1415 }
1416
1417 static int
1418 getmsg(struct ixpnpe_softc *sc, uint32_t msg[2])
1419 {
1420 mtx_assert(&sc->sc_mtx, MA_OWNED);
1421
1422 if (!ofifo_wait(sc))
1423 return EAGAIN;
1424 msg[0] = npe_reg_read(sc, IX_NPEFIFO);
1425 DPRINTF(sc->sc_dev, "%s: msg0 0x%x\n", __func__, msg[0]);
1426 if (!ofifo_wait(sc))
1427 return EAGAIN;
1428 msg[1] = npe_reg_read(sc, IX_NPEFIFO);
1429 DPRINTF(sc->sc_dev, "%s: msg1 0x%x\n", __func__, msg[1]);
1430 return 0;
1431 }
1432
1433 static void
1434 ixpnpe_intr(void *arg)
1435 {
1436 struct ixpnpe_softc *sc = arg;
1437 uint32_t status;
1438
1439 mtx_lock(&sc->sc_mtx);
1440 status = npe_reg_read(sc, IX_NPESTAT);
1441 DPRINTF(sc->sc_dev, "%s: status 0x%x\n", __func__, status);
1442 if ((status & IX_NPESTAT_OFINT) == 0) {
1443 /* NB: should not happen */
1444 device_printf(sc->sc_dev, "%s: status 0x%x\n",
1445 __func__, status);
1446 /* XXX must silence interrupt? */
1447 mtx_unlock(&sc->sc_mtx);
1448 return;
1449 }
1450 /*
1451 * A message is waiting in the output FIFO, copy it so
1452 * the interrupt will be silenced.
1453 */
1454 if (getmsg(sc, sc->sc_msg) == 0)
1455 sc->sc_msgwaiting = 1;
1456 mtx_unlock(&sc->sc_mtx);
1457 }
1458
1459 static int
1460 ififo_wait(struct ixpnpe_softc *sc)
1461 {
1462 int i;
1463
1464 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1465 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF)
1466 return 1;
1467 DELAY(10);
1468 }
1469 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1470 __func__, npe_reg_read(sc, IX_NPESTAT));
1471 return 0;
1472 }
1473
1474 static int
1475 putmsg(struct ixpnpe_softc *sc, const uint32_t msg[2])
1476 {
1477 mtx_assert(&sc->sc_mtx, MA_OWNED);
1478
1479 DPRINTF(sc->sc_dev, "%s: msg 0x%x:0x%x\n", __func__, msg[0], msg[1]);
1480 if (!ififo_wait(sc))
1481 return EIO;
1482 npe_reg_write(sc, IX_NPEFIFO, msg[0]);
1483 if (!ififo_wait(sc))
1484 return EIO;
1485 npe_reg_write(sc, IX_NPEFIFO, msg[1]);
1486
1487 return 0;
1488 }
1489
1490 /*
1491 * Send a msg to the NPE and wait for a reply. We spin as
1492 * we may be called early with interrupts not properly setup.
1493 */
1494 int
1495 ixpnpe_sendandrecvmsg_sync(struct ixpnpe_softc *sc,
1496 const uint32_t send[2], uint32_t recv[2])
1497 {
1498 int error;
1499
1500 mtx_lock(&sc->sc_mtx);
1501 error = putmsg(sc, send);
1502 if (error == 0)
1503 error = getmsg(sc, recv);
1504 mtx_unlock(&sc->sc_mtx);
1505
1506 return error;
1507 }
1508
1509 /*
1510 * Send a msg to the NPE w/o waiting for a reply.
1511 */
1512 int
1513 ixpnpe_sendmsg_async(struct ixpnpe_softc *sc, const uint32_t msg[2])
1514 {
1515 int error;
1516
1517 mtx_lock(&sc->sc_mtx);
1518 error = putmsg(sc, msg);
1519 mtx_unlock(&sc->sc_mtx);
1520
1521 return error;
1522 }
1523
1524 static int
1525 recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2])
1526 {
1527 mtx_assert(&sc->sc_mtx, MA_OWNED);
1528
1529 DPRINTF(sc->sc_dev, "%s: msgwaiting %d\n", __func__, sc->sc_msgwaiting);
1530 if (sc->sc_msgwaiting) {
1531 msg[0] = sc->sc_msg[0];
1532 msg[1] = sc->sc_msg[1];
1533 sc->sc_msgwaiting = 0;
1534 return 0;
1535 }
1536 return EAGAIN;
1537 }
1538
1539 /*
1540 * Receive any msg previously received from the NPE. If nothing
1541 * is available we return EAGAIN and the caller is required to
1542 * do a synchronous receive or try again later.
1543 */
1544 int
1545 ixpnpe_recvmsg_async(struct ixpnpe_softc *sc, uint32_t msg[2])
1546 {
1547 int error;
1548
1549 mtx_lock(&sc->sc_mtx);
1550 error = recvmsg_locked(sc, msg);
1551 mtx_unlock(&sc->sc_mtx);
1552
1553 return error;
1554 }
1555
1556 /*
1557 * Receive a msg from the NPE. If one was received asynchronously
1558 * then it's returned; otherwise we poll synchronously.
1559 */
1560 int
1561 ixpnpe_recvmsg_sync(struct ixpnpe_softc *sc, uint32_t msg[2])
1562 {
1563 int error;
1564
1565 mtx_lock(&sc->sc_mtx);
1566 error = recvmsg_locked(sc, msg);
1567 if (error == EAGAIN)
1568 error = getmsg(sc, msg);
1569 mtx_unlock(&sc->sc_mtx);
1570
1571 return error;
1572 }
Cache object: 5374b61f08cd09b78e03d3fd8d224d38
|