1 /*-
2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 /*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 */
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 /*
63 * Intel XScale Network Processing Engine (NPE) support.
64 *
65 * Each NPE has an ixpnpeX device associated with it that is
66 * attached at boot. Depending on the microcode loaded into
67 * an NPE there may be an Ethernet interface (npeX) or some
68 * other network interface (e.g. for ATM). This file has support
69 * for loading microcode images and the associated NPE CPU
70 * manipulations (start, stop, reset).
71 *
72 * The code here basically replaces the npeDl and npeMh classes
73 * in the Intel Access Library (IAL).
74 *
75 * NB: Microcode images are loaded with firmware(9). To
76 * include microcode in a static kernel include the
77 * ixpnpe_fw device. Otherwise the firmware will be
78 * automatically loaded from the filesystem.
79 */
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/module.h>
85 #include <sys/time.h>
86 #include <sys/bus.h>
87 #include <sys/resource.h>
88 #include <sys/rman.h>
89 #include <sys/sysctl.h>
90
91 #include <sys/linker.h>
92 #include <sys/firmware.h>
93
94 #include <machine/bus.h>
95 #include <machine/cpu.h>
96 #include <machine/cpufunc.h>
97 #include <machine/resource.h>
98 #include <machine/intr.h>
99 #include <arm/xscale/ixp425/ixp425reg.h>
100 #include <arm/xscale/ixp425/ixp425var.h>
101
102 #include <arm/xscale/ixp425/ixp425_npereg.h>
103 #include <arm/xscale/ixp425/ixp425_npevar.h>
104
105 struct ixpnpe_softc {
106 device_t sc_dev;
107 bus_space_tag_t sc_iot;
108 bus_space_handle_t sc_ioh;
109 bus_size_t sc_size; /* size of mapped register window */
110 struct resource *sc_irq; /* IRQ resource */
111 void *sc_ih; /* interrupt handler */
112 struct mtx sc_mtx; /* mailbox lock */
113 uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */
114 int sc_msgwaiting; /* sc_msg holds valid data */
115
116 int validImage; /* valid ucode image loaded */
117 int started; /* NPE is started */
118 uint8_t functionalityId;/* ucode functionality ID */
119 int insMemSize; /* size of instruction memory */
120 int dataMemSize; /* size of data memory */
121 uint32_t savedExecCount;
122 uint32_t savedEcsDbgCtxtReg2;
123 };
124
125 #define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff
126
127 /* used to read download map from version in microcode image */
128 #define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000
129 #define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001
130 #define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002
131 #define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F
132
133 /*
134 * masks used to extract address info from State information context
135 * register addresses as read from microcode image
136 */
137 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F
138 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0
139
140 /* LSB offset of Context Number field in State-Info Context Address */
141 #define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4
142
143 /* size (in words) of single State Information entry (ctxt reg address|data) */
144 #define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2
145
146 typedef struct {
147 uint32_t type;
148 uint32_t offset;
149 } IxNpeDlNpeMgrDownloadMapBlockEntry;
150
151 typedef union {
152 IxNpeDlNpeMgrDownloadMapBlockEntry block;
153 uint32_t eodmMarker;
154 } IxNpeDlNpeMgrDownloadMapEntry;
155
156 typedef struct {
157 /* 1st entry in the download map (there may be more than one) */
158 IxNpeDlNpeMgrDownloadMapEntry entry[1];
159 } IxNpeDlNpeMgrDownloadMap;
160
161 /* used to access an instruction or data block in a microcode image */
162 typedef struct {
163 uint32_t npeMemAddress;
164 uint32_t size;
165 uint32_t data[1];
166 } IxNpeDlNpeMgrCodeBlock;
167
168 /* used to access each Context Reg entry state-information block */
169 typedef struct {
170 uint32_t addressInfo;
171 uint32_t value;
172 } IxNpeDlNpeMgrStateInfoCtxtRegEntry;
173
174 /* used to access a state-information block in a microcode image */
175 typedef struct {
176 uint32_t size;
177 IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1];
178 } IxNpeDlNpeMgrStateInfoBlock;
179
180 static int npe_debug = 0;
181 SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RW, &npe_debug,
182 0, "IXP425 NPE debug msgs");
183 TUNABLE_INT("debug.ixp425npe", &npe_debug);
184 #define DPRINTF(dev, fmt, ...) do { \
185 if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \
186 } while (0)
187 #define DPRINTFn(n, dev, fmt, ...) do { \
188 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \
189 } while (0)
190
191 static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t);
192 static int npe_isstopped(struct ixpnpe_softc *);
193 static int npe_load_ins(struct ixpnpe_softc *,
194 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
195 static int npe_load_data(struct ixpnpe_softc *,
196 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
197 static int npe_load_stateinfo(struct ixpnpe_softc *,
198 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify);
199 static int npe_load_image(struct ixpnpe_softc *,
200 const uint32_t *imageCodePtr, int verify);
201 static int npe_cpu_reset(struct ixpnpe_softc *);
202 static int npe_cpu_start(struct ixpnpe_softc *);
203 static int npe_cpu_stop(struct ixpnpe_softc *);
204 static void npe_cmd_issue_write(struct ixpnpe_softc *,
205 uint32_t cmd, uint32_t addr, uint32_t data);
206 static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *,
207 uint32_t cmd, uint32_t addr);
208 static int npe_ins_write(struct ixpnpe_softc *,
209 uint32_t addr, uint32_t data, int verify);
210 static int npe_data_write(struct ixpnpe_softc *,
211 uint32_t addr, uint32_t data, int verify);
212 static void npe_ecs_reg_write(struct ixpnpe_softc *,
213 uint32_t reg, uint32_t data);
214 static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg);
215 static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command);
216 static void npe_cpu_step_save(struct ixpnpe_softc *);
217 static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction,
218 uint32_t ctxtNum, uint32_t ldur);
219 static void npe_cpu_step_restore(struct ixpnpe_softc *);
220 static int npe_logical_reg_read(struct ixpnpe_softc *,
221 uint32_t regAddr, uint32_t regSize,
222 uint32_t ctxtNum, uint32_t *regVal);
223 static int npe_logical_reg_write(struct ixpnpe_softc *,
224 uint32_t regAddr, uint32_t regVal,
225 uint32_t regSize, uint32_t ctxtNum, int verify);
226 static int npe_physical_reg_write(struct ixpnpe_softc *,
227 uint32_t regAddr, uint32_t regValue, int verify);
228 static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum,
229 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify);
230
231 static void ixpnpe_intr(void *arg);
232
233 static uint32_t
234 npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off)
235 {
236 uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
237 DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v);
238 return v;
239 }
240
241 static void
242 npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val)
243 {
244 DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val);
245 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
246 }
247
248 struct ixpnpe_softc *
249 ixpnpe_attach(device_t dev)
250 {
251 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
252 struct ixpnpe_softc *sc;
253 bus_addr_t base;
254 int rid, irq;
255
256 /* XXX M_BUS */
257 sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO);
258 sc->sc_dev = dev;
259 sc->sc_iot = sa->sc_iot;
260 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF);
261
262 if (device_get_unit(dev) == 0) {
263 base = IXP425_NPE_B_HWBASE;
264 sc->sc_size = IXP425_NPE_B_SIZE;
265 irq = IXP425_INT_NPE_B;
266
267 /* size of instruction memory */
268 sc->insMemSize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB;
269 /* size of data memory */
270 sc->dataMemSize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB;
271 } else {
272 base = IXP425_NPE_C_HWBASE;
273 sc->sc_size = IXP425_NPE_C_SIZE;
274 irq = IXP425_INT_NPE_C;
275
276 /* size of instruction memory */
277 sc->insMemSize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC;
278 /* size of data memory */
279 sc->dataMemSize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC;
280 }
281 if (bus_space_map(sc->sc_iot, base, sc->sc_size, 0, &sc->sc_ioh))
282 panic("%s: Cannot map registers", device_get_name(dev));
283
284 /*
285 * Setup IRQ and handler for NPE message support.
286 */
287 rid = 0;
288 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
289 irq, irq, 1, RF_ACTIVE);
290 if (!sc->sc_irq)
291 panic("%s: Unable to allocate irq %u", device_get_name(dev), irq);
292 /* XXX could be a source of entropy */
293 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
294 NULL, ixpnpe_intr, sc, &sc->sc_ih);
295 /* enable output fifo interrupts (NB: must also set OFIFO Write Enable) */
296 npe_reg_write(sc, IX_NPECTL,
297 npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE));
298
299 return sc;
300 }
301
302 void
303 ixpnpe_detach(struct ixpnpe_softc *sc)
304 {
305 /* disable output fifo interrupts */
306 npe_reg_write(sc, IX_NPECTL,
307 npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE));
308
309 bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih);
310 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
311 mtx_destroy(&sc->sc_mtx);
312 free(sc, M_TEMP);
313 }
314
315 int
316 ixpnpe_stopandreset(struct ixpnpe_softc *sc)
317 {
318 int error;
319
320 mtx_lock(&sc->sc_mtx);
321 error = npe_cpu_stop(sc); /* stop NPE */
322 if (error == 0)
323 error = npe_cpu_reset(sc); /* reset it */
324 if (error == 0)
325 sc->started = 0; /* mark stopped */
326 mtx_unlock(&sc->sc_mtx);
327
328 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
329 return error;
330 }
331
332 static int
333 ixpnpe_start_locked(struct ixpnpe_softc *sc)
334 {
335 int error;
336
337 if (!sc->started) {
338 error = npe_cpu_start(sc);
339 if (error == 0)
340 sc->started = 1;
341 } else
342 error = 0;
343
344 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
345 return error;
346 }
347
348 int
349 ixpnpe_start(struct ixpnpe_softc *sc)
350 {
351 int ret;
352
353 mtx_lock(&sc->sc_mtx);
354 ret = ixpnpe_start_locked(sc);
355 mtx_unlock(&sc->sc_mtx);
356 return (ret);
357 }
358
359 int
360 ixpnpe_stop(struct ixpnpe_softc *sc)
361 {
362 int error;
363
364 mtx_lock(&sc->sc_mtx);
365 error = npe_cpu_stop(sc);
366 if (error == 0)
367 sc->started = 0;
368 mtx_unlock(&sc->sc_mtx);
369
370 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
371 return error;
372 }
373
374 /*
375 * Indicates the start of an NPE Image, in new NPE Image Library format.
376 * 2 consecutive occurances indicates the end of the NPE Image Library
377 */
378 #define NPE_IMAGE_MARKER 0xfeedf00d
379
380 /*
381 * NPE Image Header definition, used in new NPE Image Library format
382 */
383 typedef struct {
384 uint32_t marker;
385 uint32_t id;
386 uint32_t size;
387 } IxNpeDlImageMgrImageHeader;
388
389 static int
390 npe_findimage(struct ixpnpe_softc *sc,
391 const uint32_t *imageLibrary, uint32_t imageId,
392 const uint32_t **imagePtr, uint32_t *imageSize)
393 {
394 const IxNpeDlImageMgrImageHeader *image;
395 uint32_t offset = 0;
396
397 while (imageLibrary[offset] == NPE_IMAGE_MARKER) {
398 image = (const IxNpeDlImageMgrImageHeader *)&imageLibrary[offset];
399 offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t);
400
401 DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n",
402 __func__, offset, image->marker, image->id, image->size);
403 if (image->id == imageId) {
404 *imagePtr = imageLibrary + offset;
405 *imageSize = image->size;
406 return 0;
407 }
408 /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */
409 if (image->id == NPE_IMAGE_MARKER) {
410 DPRINTF(sc->sc_dev,
411 "imageId 0x%08x not found in image library header\n", imageId);
412 /* reached end of library, image not found */
413 return ESRCH;
414 }
415 offset += image->size;
416 }
417 return ESRCH;
418 }
419
420 int
421 ixpnpe_init(struct ixpnpe_softc *sc, const char *imageName, uint32_t imageId)
422 {
423 uint32_t imageSize;
424 const uint32_t *imageCodePtr;
425 const struct firmware *fw;
426 int error;
427
428 DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId);
429
430 #if 0
431 IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId);
432 /*
433 * Checking if image being loaded is meant for device that is running.
434 * Image is forward compatible. i.e Image built for IXP42X should run
435 * on IXP46X but not vice versa.
436 */
437 if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK))
438 return EINVAL;
439 #endif
440 error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */
441 if (error != 0)
442 return error;
443
444 fw = firmware_get(imageName);
445 if (fw == NULL)
446 return ENOENT;
447
448 /* Locate desired image in files w/ combined images */
449 error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize);
450 if (error != 0)
451 goto done;
452
453 /*
454 * If download was successful, store image Id in list of
455 * currently loaded images. If a critical error occured
456 * during download, record that the NPE has an invalid image
457 */
458 mtx_lock(&sc->sc_mtx);
459 error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/);
460 if (error == 0) {
461 sc->validImage = 1;
462 error = ixpnpe_start_locked(sc);
463 } else {
464 sc->validImage = 0;
465 }
466 sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId);
467 mtx_unlock(&sc->sc_mtx);
468 done:
469 firmware_put(fw, FIRMWARE_UNLOAD);
470 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
471 return error;
472 }
473
474 int
475 ixpnpe_getfunctionality(struct ixpnpe_softc *sc)
476 {
477 return (sc->validImage ? sc->functionalityId : 0);
478 }
479
480 static int
481 npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet)
482 {
483 uint32_t val;
484
485 val = npe_reg_read(sc, reg);
486 DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n",
487 __func__, reg, expectedBitsSet, val,
488 (val & expectedBitsSet) == expectedBitsSet);
489 return ((val & expectedBitsSet) == expectedBitsSet);
490 }
491
492 static int
493 npe_isstopped(struct ixpnpe_softc *sc)
494 {
495 return npe_checkbits(sc,
496 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP);
497 }
498
499 static int
500 npe_load_ins(struct ixpnpe_softc *sc,
501 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
502 {
503 uint32_t npeMemAddress;
504 int i, blockSize;
505
506 npeMemAddress = bp->npeMemAddress;
507 blockSize = bp->size; /* NB: instruction/data count */
508 if (npeMemAddress + blockSize > sc->insMemSize) {
509 device_printf(sc->sc_dev, "Block size too big for NPE memory\n");
510 return EINVAL; /* XXX */
511 }
512 for (i = 0; i < blockSize; i++, npeMemAddress++) {
513 if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
514 device_printf(sc->sc_dev, "NPE instruction write failed");
515 return EIO;
516 }
517 }
518 return 0;
519 }
520
521 static int
522 npe_load_data(struct ixpnpe_softc *sc,
523 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
524 {
525 uint32_t npeMemAddress;
526 int i, blockSize;
527
528 npeMemAddress = bp->npeMemAddress;
529 blockSize = bp->size; /* NB: instruction/data count */
530 if (npeMemAddress + blockSize > sc->dataMemSize) {
531 device_printf(sc->sc_dev, "Block size too big for NPE memory\n");
532 return EINVAL;
533 }
534 for (i = 0; i < blockSize; i++, npeMemAddress++) {
535 if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
536 device_printf(sc->sc_dev, "NPE data write failed\n");
537 return EIO;
538 }
539 }
540 return 0;
541 }
542
543 static int
544 npe_load_stateinfo(struct ixpnpe_softc *sc,
545 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify)
546 {
547 int i, nentries, error;
548
549 npe_cpu_step_save(sc);
550
551 /* for each state-info context register entry in block */
552 nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE;
553 error = 0;
554 for (i = 0; i < nentries; i++) {
555 /* each state-info entry is 2 words (address, value) in length */
556 uint32_t regVal = bp->ctxtRegEntry[i].value;
557 uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo;
558
559 uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG);
560 uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >>
561 IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM;
562
563 /* error-check Context Register No. and Context Number values */
564 if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) {
565 device_printf(sc->sc_dev, "invalid Context Register %u\n", reg);
566 error = EINVAL;
567 break;
568 }
569 if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) {
570 device_printf(sc->sc_dev, "invalid Context Number %u\n", cNum);
571 error = EINVAL;
572 break;
573 }
574 /* NOTE that there is no STEVT register for Context 0 */
575 if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) {
576 device_printf(sc->sc_dev, "no STEVT for Context 0\n");
577 error = EINVAL;
578 break;
579 }
580
581 if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) {
582 device_printf(sc->sc_dev, "write of state-info to NPE failed\n");
583 error = EIO;
584 break;
585 }
586 }
587
588 npe_cpu_step_restore(sc);
589 return error;
590 }
591
592 static int
593 npe_load_image(struct ixpnpe_softc *sc,
594 const uint32_t *imageCodePtr, int verify)
595 {
596 #define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP)
597 const IxNpeDlNpeMgrDownloadMap *downloadMap;
598 int i, error;
599
600 if (!npe_isstopped(sc)) { /* verify NPE is stopped */
601 device_printf(sc->sc_dev, "cannot load image, NPE not stopped\n");
602 return EIO;
603 }
604
605 /*
606 * Read Download Map, checking each block type and calling
607 * appropriate function to perform download
608 */
609 error = 0;
610 downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr;
611 for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) {
612 /* calculate pointer to block to be downloaded */
613 const uint32_t *bp = imageCodePtr + downloadMap->entry[i].block.offset;
614 switch (downloadMap->entry[i].block.type) {
615 case IX_NPEDL_BLOCK_TYPE_INSTRUCTION:
616 error = npe_load_ins(sc,
617 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
618 DPRINTF(sc->sc_dev, "%s: inst, error %d\n", __func__, error);
619 break;
620 case IX_NPEDL_BLOCK_TYPE_DATA:
621 error = npe_load_data(sc,
622 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
623 DPRINTF(sc->sc_dev, "%s: data, error %d\n", __func__, error);
624 break;
625 case IX_NPEDL_BLOCK_TYPE_STATE:
626 error = npe_load_stateinfo(sc,
627 (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify);
628 DPRINTF(sc->sc_dev, "%s: state, error %d\n", __func__, error);
629 break;
630 default:
631 device_printf(sc->sc_dev,
632 "unknown block type 0x%x in download map\n",
633 downloadMap->entry[i].block.type);
634 error = EIO; /* XXX */
635 break;
636 }
637 if (error != 0)
638 break;
639 }
640 return error;
641 #undef EOM
642 }
643
644 /* contains Reset values for Context Store Registers */
645 static const struct {
646 uint32_t regAddr;
647 uint32_t regResetVal;
648 } ixNpeDlEcsRegResetValues[] = {
649 { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET },
650 { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET },
651 { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET },
652 { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET },
653 { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET },
654 { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET },
655 { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET },
656 { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET },
657 { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET },
658 { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET },
659 { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET },
660 { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET },
661 { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET }
662 };
663
664 /* contains Reset values for Context Store Registers */
665 static const uint32_t ixNpeDlCtxtRegResetValues[] = {
666 IX_NPEDL_CTXT_REG_RESET_STEVT,
667 IX_NPEDL_CTXT_REG_RESET_STARTPC,
668 IX_NPEDL_CTXT_REG_RESET_REGMAP,
669 IX_NPEDL_CTXT_REG_RESET_CINDEX,
670 };
671
672 #define IX_NPEDL_RESET_NPE_PARITY 0x0800
673 #define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF
674 #define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF
675
676 static int
677 npe_cpu_reset(struct ixpnpe_softc *sc)
678 {
679 #define N(a) (sizeof(a) / sizeof(a[0]))
680 struct ixp425_softc *sa = device_get_softc(device_get_parent(sc->sc_dev));
681 uint32_t ctxtReg; /* identifies Context Store reg (0-3) */
682 uint32_t regAddr;
683 uint32_t regVal;
684 uint32_t resetNpeParity;
685 uint32_t ixNpeConfigCtrlRegVal;
686 int i, error = 0;
687
688 /* pre-store the NPE Config Control Register Value */
689 ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL);
690 ixNpeConfigCtrlRegVal |= 0x3F000000;
691
692 /* disable the parity interrupt */
693 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
694 (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK));
695 DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n",
696 __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK);
697
698 npe_cpu_step_save(sc);
699
700 /*
701 * Clear the FIFOs.
702 */
703 while (npe_checkbits(sc,
704 IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) {
705 /* read from the Watch-point FIFO until empty */
706 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO);
707 }
708
709 while (npe_checkbits(sc,
710 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) {
711 /* read from the outFIFO until empty */
712 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO);
713 }
714
715 while (npe_checkbits(sc,
716 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) {
717 /*
718 * Step execution of the NPE intruction to read inFIFO using
719 * the Debug Executing Context stack.
720 */
721 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0);
722 if (error != 0) {
723 DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n",
724 __func__, error);
725 npe_cpu_step_restore(sc);
726 return error;
727 }
728 }
729
730 /*
731 * Reset the mailbox reg
732 */
733 /* ...from XScale side */
734 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST);
735 /* ...from NPE side */
736 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0);
737 if (error != 0) {
738 DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n", __func__, error);
739 npe_cpu_step_restore(sc);
740 return error;
741 }
742
743 /*
744 * Reset the physical registers in the NPE register file:
745 * Note: no need to save/restore REGMAP for Context 0 here
746 * since all Context Store regs are reset in subsequent code.
747 */
748 for (regAddr = 0;
749 regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0;
750 regAddr++) {
751 /* for each physical register in the NPE reg file, write 0 : */
752 error = npe_physical_reg_write(sc, regAddr, 0, TRUE);
753 if (error != 0) {
754 DPRINTF(sc->sc_dev, "%s: cannot write phy reg, error %u\n",
755 __func__, error);
756 npe_cpu_step_restore(sc);
757 return error; /* abort reset */
758 }
759 }
760
761 /*
762 * Reset the context store:
763 */
764 for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) {
765 /* set each context's Context Store registers to reset values: */
766 for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) {
767 /* NOTE that there is no STEVT register for Context 0 */
768 if (!(i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT)) {
769 regVal = ixNpeDlCtxtRegResetValues[ctxtReg];
770 error = npe_ctx_reg_write(sc, i, ctxtReg, regVal, TRUE);
771 if (error != 0) {
772 DPRINTF(sc->sc_dev, "%s: cannot write ctx reg, error %u\n",
773 __func__, error);
774 npe_cpu_step_restore(sc);
775 return error; /* abort reset */
776 }
777 }
778 }
779 }
780
781 npe_cpu_step_restore(sc);
782
783 /* write Reset values to Execution Context Stack registers */
784 for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++)
785 npe_ecs_reg_write(sc,
786 ixNpeDlEcsRegResetValues[i].regAddr,
787 ixNpeDlEcsRegResetValues[i].regResetVal);
788
789 /* clear the profile counter */
790 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
791
792 /* clear registers EXCT, AP0, AP1, AP2 and AP3 */
793 for (regAddr = IX_NPEDL_REG_OFFSET_EXCT;
794 regAddr <= IX_NPEDL_REG_OFFSET_AP3;
795 regAddr += sizeof(uint32_t))
796 npe_reg_write(sc, regAddr, 0);
797
798 /* Reset the Watch-count register */
799 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0);
800
801 /*
802 * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation
803 */
804
805 /*
806 * Reset the NPE and its coprocessor - to reset internal
807 * states and remove parity error. Note this makes no
808 * sense based on the documentation. The feature control
809 * register always reads back as 0 on the ixp425 and further
810 * the bit definition of NPEA/NPEB is off by 1 according to
811 * the Intel documention--so we're blindly following the
812 * Intel code w/o any real understanding.
813 */
814 regVal = EXP_BUS_READ_4(sa, EXP_FCTRL_OFFSET);
815 DPRINTFn(2, sc->sc_dev, "%s: FCTRL 0x%x\n", __func__, regVal);
816 resetNpeParity =
817 IX_NPEDL_RESET_NPE_PARITY << (1 + device_get_unit(sc->sc_dev));
818 DPRINTFn(2, sc->sc_dev, "%s: FCTRL fuse parity, write 0x%x\n",
819 __func__, regVal | resetNpeParity);
820 EXP_BUS_WRITE_4(sa, EXP_FCTRL_OFFSET, regVal | resetNpeParity);
821
822 /* un-fuse and un-reset the NPE & coprocessor */
823 DPRINTFn(2, sc->sc_dev, "%s: FCTRL unfuse parity, write 0x%x\n",
824 __func__, regVal & resetNpeParity);
825 EXP_BUS_WRITE_4(sa, EXP_FCTRL_OFFSET, regVal &~ resetNpeParity);
826
827 /*
828 * Call NpeMgr function to stop the NPE again after the Feature Control
829 * has unfused and Un-Reset the NPE and its associated Coprocessors.
830 */
831 error = npe_cpu_stop(sc);
832
833 /* restore NPE configuration bus Control Register - Parity Settings */
834 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
835 (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK));
836 DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n",
837 __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL));
838
839 return error;
840 #undef N
841 }
842
843 static int
844 npe_cpu_start(struct ixpnpe_softc *sc)
845 {
846 uint32_t ecsRegVal;
847
848 /*
849 * Ensure only Background Context Stack Level is Active by turning off
850 * the Active bit in each of the other Executing Context Stack levels.
851 */
852 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
853 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
854 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal);
855
856 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
857 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
858 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal);
859
860 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0);
861 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
862 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal);
863
864 /* clear the pipeline */
865 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
866
867 /* start NPE execution by issuing command through EXCTL register on NPE */
868 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START);
869
870 /*
871 * Check execution status of NPE to verify operation was successful.
872 */
873 return npe_checkbits(sc,
874 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO;
875 }
876
877 static int
878 npe_cpu_stop(struct ixpnpe_softc *sc)
879 {
880 /* stop NPE execution by issuing command through EXCTL register on NPE */
881 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP);
882
883 /* verify that NPE Stop was successful */
884 return npe_checkbits(sc,
885 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO;
886 }
887
888 #define IX_NPEDL_REG_SIZE_BYTE 8
889 #define IX_NPEDL_REG_SIZE_SHORT 16
890 #define IX_NPEDL_REG_SIZE_WORD 32
891
892 /*
893 * Introduce extra read cycles after issuing read command to NPE
894 * so that we read the register after the NPE has updated it
895 * This is to overcome race condition between XScale and NPE
896 */
897 #define IX_NPEDL_DELAY_READ_CYCLES 2
898 /*
899 * To mask top three MSBs of 32bit word to download into NPE IMEM
900 */
901 #define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF;
902
903 static void
904 npe_cmd_issue_write(struct ixpnpe_softc *sc,
905 uint32_t cmd, uint32_t addr, uint32_t data)
906 {
907 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data);
908 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
909 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
910 }
911
912 static uint32_t
913 npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr)
914 {
915 uint32_t data;
916 int i;
917
918 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
919 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
920 for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++)
921 data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
922 return data;
923 }
924
925 static int
926 npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
927 {
928 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
929 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data);
930 if (verify) {
931 uint32_t rdata;
932
933 /*
934 * Write invalid data to this reg, so we can see if we're reading
935 * the EXDATA register too early.
936 */
937 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
938
939 /* Disabled since top 3 MSB are not used for Azusa hardware Refer WR:IXA00053900*/
940 data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
941
942 rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM, addr);
943 rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
944
945 if (data != rdata)
946 return EIO;
947 }
948 return 0;
949 }
950
951 static int
952 npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
953 {
954 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
955 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data);
956 if (verify) {
957 /*
958 * Write invalid data to this reg, so we can see if we're reading
959 * the EXDATA register too early.
960 */
961 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
962 if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr))
963 return EIO;
964 }
965 return 0;
966 }
967
968 static void
969 npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data)
970 {
971 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data);
972 }
973
974 static uint32_t
975 npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg)
976 {
977 return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg);
978 }
979
980 static void
981 npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command)
982 {
983 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command);
984 }
985
986 static void
987 npe_cpu_step_save(struct ixpnpe_softc *sc)
988 {
989 /* turn off the halt bit by clearing Execution Count register. */
990 /* save reg contents 1st and restore later */
991 sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT);
992 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0);
993
994 /* ensure that IF and IE are on (temporarily), so that we don't end up
995 * stepping forever */
996 sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2);
997
998 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2,
999 (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF |
1000 IX_NPEDL_MASK_ECS_DBG_REG_2_IE));
1001 }
1002
1003 static int
1004 npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction,
1005 uint32_t ctxtNum, uint32_t ldur)
1006 {
1007 #define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000
1008 uint32_t ecsDbgRegVal;
1009 uint32_t oldWatchcount, newWatchcount;
1010 int tries;
1011
1012 /* set the Active bit, and the LDUR, in the debug level */
1013 ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
1014 (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR);
1015
1016 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal);
1017
1018 /*
1019 * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the
1020 * instruction, and set SELCTXT at ECS DEBUG Level to specify which context
1021 * store to access.
1022 * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
1023 */
1024 ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) |
1025 (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT);
1026
1027 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal);
1028
1029 /* clear the pipeline */
1030 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1031
1032 /* load NPE instruction into the instruction register */
1033 npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction);
1034
1035 /* we need this value later to wait for completion of NPE execution step */
1036 oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1037
1038 /* issue a Step One command via the Execution Control register */
1039 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP);
1040
1041 /*
1042 * Force the XScale to wait until the NPE has finished execution step
1043 * NOTE that this delay will be very small, just long enough to allow a
1044 * single NPE instruction to complete execution; if instruction execution
1045 * is not completed before timeout retries, exit the while loop.
1046 */
1047 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1048 for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES &&
1049 newWatchcount == oldWatchcount; tries++) {
1050 /* Watch Count register increments when NPE completes an instruction */
1051 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1052 }
1053 return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO;
1054 #undef IX_NPE_DL_MAX_NUM_OF_RETRIES
1055 }
1056
1057 static void
1058 npe_cpu_step_restore(struct ixpnpe_softc *sc)
1059 {
1060 /* clear active bit in debug level */
1061 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0);
1062
1063 /* clear the pipeline */
1064 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1065
1066 /* restore Execution Count register contents. */
1067 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount);
1068
1069 /* restore IF and IE bits to original values */
1070 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2);
1071 }
1072
1073 static int
1074 npe_logical_reg_read(struct ixpnpe_softc *sc,
1075 uint32_t regAddr, uint32_t regSize,
1076 uint32_t ctxtNum, uint32_t *regVal)
1077 {
1078 uint32_t npeInstruction, mask;
1079 int error;
1080
1081 switch (regSize) {
1082 case IX_NPEDL_REG_SIZE_BYTE:
1083 npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE;
1084 mask = 0xff;
1085 break;
1086 case IX_NPEDL_REG_SIZE_SHORT:
1087 npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT;
1088 mask = 0xffff;
1089 break;
1090 case IX_NPEDL_REG_SIZE_WORD:
1091 npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD;
1092 mask = 0xffffffff;
1093 break;
1094 default:
1095 return EINVAL;
1096 }
1097
1098 /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */
1099 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) |
1100 (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1101
1102 /* step execution of NPE intruction using Debug Executing Context stack */
1103 error = npe_cpu_step(sc, npeInstruction, ctxtNum, IX_NPEDL_RD_INSTR_LDUR);
1104 if (error != 0) {
1105 DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n",
1106 __func__, regAddr, regSize, ctxtNum, error);
1107 return error;
1108 }
1109 /* read value of register from Execution Data register */
1110 *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1111
1112 /* align value from left to right */
1113 *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask;
1114
1115 return 0;
1116 }
1117
1118 static int
1119 npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal,
1120 uint32_t regSize, uint32_t ctxtNum, int verify)
1121 {
1122 int error;
1123
1124 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n",
1125 __func__, regAddr, regVal, regSize, ctxtNum);
1126 if (regSize == IX_NPEDL_REG_SIZE_WORD) {
1127 /* NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3| */
1128 /* Write upper half-word (short) to |d0|d1| */
1129 error = npe_logical_reg_write(sc, regAddr,
1130 regVal >> IX_NPEDL_REG_SIZE_SHORT,
1131 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1132 if (error != 0)
1133 return error;
1134
1135 /* Write lower half-word (short) to |d2|d3| */
1136 error = npe_logical_reg_write(sc,
1137 regAddr + sizeof(uint16_t),
1138 regVal & 0xffff,
1139 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1140 } else {
1141 uint32_t npeInstruction;
1142
1143 switch (regSize) {
1144 case IX_NPEDL_REG_SIZE_BYTE:
1145 npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE;
1146 regVal &= 0xff;
1147 break;
1148 case IX_NPEDL_REG_SIZE_SHORT:
1149 npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT;
1150 regVal &= 0xffff;
1151 break;
1152 default:
1153 return EINVAL;
1154 }
1155 /* fill dest operand field of instruction with destination reg addr */
1156 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1157
1158 /* fill src operand field of instruction with least-sig 5 bits of val*/
1159 npeInstruction |= ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) <<
1160 IX_NPEDL_OFFSET_INSTR_SRC);
1161
1162 /* fill coprocessor field of instruction with most-sig 11 bits of val*/
1163 npeInstruction |= ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) <<
1164 IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA);
1165
1166 /* step execution of NPE intruction using Debug ECS */
1167 error = npe_cpu_step(sc, npeInstruction,
1168 ctxtNum, IX_NPEDL_WR_INSTR_LDUR);
1169 }
1170 if (error != 0) {
1171 DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u writing reg\n",
1172 __func__, regAddr, regVal, regSize, ctxtNum, error);
1173 return error;
1174 }
1175 if (verify) {
1176 uint32_t retRegVal;
1177
1178 error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum, &retRegVal);
1179 if (error == 0 && regVal != retRegVal)
1180 error = EIO; /* XXX ambiguous */
1181 }
1182 return error;
1183 }
1184
1185 /*
1186 * There are 32 physical registers used in an NPE. These are
1187 * treated as 16 pairs of 32-bit registers. To write one of the pair,
1188 * write the pair number (0-16) to the REGMAP for Context 0. Then write
1189 * the value to register 0 or 4 in the regfile, depending on which
1190 * register of the pair is to be written
1191 */
1192 static int
1193 npe_physical_reg_write(struct ixpnpe_softc *sc,
1194 uint32_t regAddr, uint32_t regValue, int verify)
1195 {
1196 int error;
1197
1198 /*
1199 * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair (0-16)
1200 * of physical registers to write .
1201 */
1202 error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1203 (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP),
1204 IX_NPEDL_REG_SIZE_SHORT, 0, verify);
1205 if (error == 0) {
1206 /* regAddr = 0 or 4 */
1207 regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) *
1208 sizeof(uint32_t);
1209 error = npe_logical_reg_write(sc, regAddr, regValue,
1210 IX_NPEDL_REG_SIZE_WORD, 0, verify);
1211 }
1212 return error;
1213 }
1214
1215 static int
1216 npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum,
1217 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify)
1218 {
1219 DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n",
1220 __func__, ctxtNum, ctxtReg, ctxtRegVal);
1221 /*
1222 * Context 0 has no STARTPC. Instead, this value is used to set
1223 * NextPC for Background ECS, to set where NPE starts executing code
1224 */
1225 if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) {
1226 /* read BG_CTXT_REG_0, update NEXTPC bits, and write back to reg */
1227 uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0);
1228 v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1229 v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) &
1230 IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1231
1232 npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v);
1233 return 0;
1234 } else {
1235 static const struct {
1236 uint32_t regAddress;
1237 uint32_t regSize;
1238 } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = {
1239 { IX_NPEDL_CTXT_REG_ADDR_STEVT, IX_NPEDL_REG_SIZE_BYTE },
1240 { IX_NPEDL_CTXT_REG_ADDR_STARTPC, IX_NPEDL_REG_SIZE_SHORT },
1241 { IX_NPEDL_CTXT_REG_ADDR_REGMAP, IX_NPEDL_REG_SIZE_SHORT },
1242 { IX_NPEDL_CTXT_REG_ADDR_CINDEX, IX_NPEDL_REG_SIZE_BYTE }
1243 };
1244 return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress,
1245 ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify);
1246 }
1247 }
1248
1249 /*
1250 * NPE Mailbox support.
1251 */
1252 #define IX_NPEMH_MAXTRIES 100000
1253
1254 static int
1255 ixpnpe_ofifo_wait(struct ixpnpe_softc *sc)
1256 {
1257 int i;
1258
1259 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1260 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE)
1261 return 1;
1262 DELAY(10);
1263 }
1264 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1265 __func__, npe_reg_read(sc, IX_NPESTAT));
1266 return 0;
1267 }
1268
1269 static void
1270 ixpnpe_intr(void *arg)
1271 {
1272 struct ixpnpe_softc *sc = arg;
1273 uint32_t status;
1274
1275 status = npe_reg_read(sc, IX_NPESTAT);
1276 if ((status & IX_NPESTAT_OFINT) == 0) {
1277 /* NB: should not happen */
1278 device_printf(sc->sc_dev, "%s: status 0x%x\n", __func__, status);
1279 /* XXX must silence interrupt? */
1280 return;
1281 }
1282 /*
1283 * A message is waiting in the output FIFO, copy it so
1284 * the interrupt will be silenced; then signal anyone
1285 * waiting to collect the result.
1286 */
1287 sc->sc_msgwaiting = -1; /* NB: error indicator */
1288 if (ixpnpe_ofifo_wait(sc)) {
1289 sc->sc_msg[0] = npe_reg_read(sc, IX_NPEFIFO);
1290 if (ixpnpe_ofifo_wait(sc)) {
1291 sc->sc_msg[1] = npe_reg_read(sc, IX_NPEFIFO);
1292 sc->sc_msgwaiting = 1; /* successful fetch */
1293 }
1294 }
1295 wakeup_one(sc);
1296 }
1297
1298 static int
1299 ixpnpe_ififo_wait(struct ixpnpe_softc *sc)
1300 {
1301 int i;
1302
1303 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1304 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF)
1305 return 1;
1306 DELAY(10);
1307 }
1308 return 0;
1309 }
1310
1311 static int
1312 ixpnpe_sendmsg_locked(struct ixpnpe_softc *sc, const uint32_t msg[2])
1313 {
1314 int error = 0;
1315
1316 mtx_assert(&sc->sc_mtx, MA_OWNED);
1317
1318 sc->sc_msgwaiting = 0;
1319 if (ixpnpe_ififo_wait(sc)) {
1320 npe_reg_write(sc, IX_NPEFIFO, msg[0]);
1321 if (ixpnpe_ififo_wait(sc))
1322 npe_reg_write(sc, IX_NPEFIFO, msg[1]);
1323 else
1324 error = EIO;
1325 } else
1326 error = EIO;
1327
1328 if (error)
1329 device_printf(sc->sc_dev, "input FIFO timeout, msg [0x%x,0x%x]\n",
1330 msg[0], msg[1]);
1331 return error;
1332 }
1333
1334 static int
1335 ixpnpe_recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2])
1336 {
1337 mtx_assert(&sc->sc_mtx, MA_OWNED);
1338
1339 if (!sc->sc_msgwaiting)
1340 msleep(sc, &sc->sc_mtx, 0, "npemh", 0);
1341 bcopy(sc->sc_msg, msg, sizeof(sc->sc_msg));
1342 /* NB: sc_msgwaiting != 1 means the ack fetch failed */
1343 return sc->sc_msgwaiting != 1 ? EIO : 0;
1344 }
1345
1346 /*
1347 * Send a msg to the NPE and wait for a reply. We use the
1348 * private mutex and sleep until an interrupt is received
1349 * signalling the availability of data in the output FIFO
1350 * so the caller cannot be holding a mutex. May be better
1351 * piggyback on the caller's mutex instead but that would
1352 * make other locking confusing.
1353 */
1354 int
1355 ixpnpe_sendandrecvmsg(struct ixpnpe_softc *sc,
1356 const uint32_t send[2], uint32_t recv[2])
1357 {
1358 int error;
1359
1360 mtx_lock(&sc->sc_mtx);
1361 error = ixpnpe_sendmsg_locked(sc, send);
1362 if (error == 0)
1363 error = ixpnpe_recvmsg_locked(sc, recv);
1364 mtx_unlock(&sc->sc_mtx);
1365
1366 return error;
1367 }
1368
1369 /* XXX temporary, not reliable */
1370
1371 int
1372 ixpnpe_sendmsg(struct ixpnpe_softc *sc, const uint32_t msg[2])
1373 {
1374 int error;
1375
1376 mtx_lock(&sc->sc_mtx);
1377 error = ixpnpe_sendmsg_locked(sc, msg);
1378 mtx_unlock(&sc->sc_mtx);
1379
1380 return error;
1381 }
1382
1383 int
1384 ixpnpe_recvmsg(struct ixpnpe_softc *sc, uint32_t msg[2])
1385 {
1386 int error;
1387
1388 mtx_lock(&sc->sc_mtx);
1389 if (sc->sc_msgwaiting)
1390 bcopy(sc->sc_msg, msg, sizeof(sc->sc_msg));
1391 /* NB: sc_msgwaiting != 1 means the ack fetch failed */
1392 error = sc->sc_msgwaiting != 1 ? EIO : 0;
1393 mtx_unlock(&sc->sc_mtx);
1394
1395 return error;
1396 }
Cache object: e08d09c5a1b0a42224411a46482afe73
|