1 /*-
2 * Copyright (c) 2015 Brian Fundakowski Feldman. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD: releng/11.2/sys/dev/spibus/spigen.c 332942 2018-04-24 17:00:08Z ian $");
27
28 #include "opt_platform.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mman.h>
38 #include <sys/mutex.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/rwlock.h>
42 #include <sys/spigenio.h>
43 #include <sys/sysctl.h>
44 #include <sys/types.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pager.h>
51
52 #include <dev/spibus/spi.h>
53 #include <dev/spibus/spibusvar.h>
54
55 #ifdef FDT
56 #include <dev/ofw/ofw_bus_subr.h>
57 #endif
58
59 #include "spibus_if.h"
60
61 #define SPIGEN_OPEN (1 << 0)
62 #define SPIGEN_MMAP_BUSY (1 << 1)
63
64 struct spigen_softc {
65 device_t sc_dev;
66 struct cdev *sc_cdev;
67 struct mtx sc_mtx;
68 uint32_t sc_command_length_max; /* cannot change while mmapped */
69 uint32_t sc_data_length_max; /* cannot change while mmapped */
70 vm_object_t sc_mmap_buffer; /* command, then data */
71 vm_offset_t sc_mmap_kvaddr;
72 size_t sc_mmap_buffer_size;
73 int sc_debug;
74 int sc_flags;
75 };
76
77 static int
78 spigen_probe(device_t dev)
79 {
80 int rv;
81
82 /*
83 * By default we only bid to attach if specifically added by our parent
84 * (usually via hint.spigen.#.at=busname). On FDT systems we bid as the
85 * default driver based on being configured in the FDT data.
86 */
87 rv = BUS_PROBE_NOWILDCARD;
88
89 #ifdef FDT
90 if (ofw_bus_status_okay(dev) &&
91 ofw_bus_is_compatible(dev, "freebsd,spigen"))
92 rv = BUS_PROBE_DEFAULT;
93 #endif
94
95 device_set_desc(dev, "SPI Generic IO");
96
97 return (rv);
98 }
99
100 static int spigen_open(struct cdev *, int, int, struct thread *);
101 static int spigen_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
102 static int spigen_close(struct cdev *, int, int, struct thread *);
103 static d_mmap_single_t spigen_mmap_single;
104
105 static struct cdevsw spigen_cdevsw = {
106 .d_version = D_VERSION,
107 .d_name = "spigen",
108 .d_open = spigen_open,
109 .d_ioctl = spigen_ioctl,
110 .d_mmap_single = spigen_mmap_single,
111 .d_close = spigen_close
112 };
113
114 static int
115 spigen_command_length_max_proc(SYSCTL_HANDLER_ARGS)
116 {
117 struct spigen_softc *sc = (struct spigen_softc *)arg1;
118 uint32_t command_length_max;
119 int error;
120
121 mtx_lock(&sc->sc_mtx);
122 command_length_max = sc->sc_command_length_max;
123 mtx_unlock(&sc->sc_mtx);
124 error = sysctl_handle_int(oidp, &command_length_max,
125 sizeof(command_length_max), req);
126 if (error == 0 && req->newptr != NULL) {
127 mtx_lock(&sc->sc_mtx);
128 if (sc->sc_mmap_buffer != NULL)
129 error = EBUSY;
130 else
131 sc->sc_command_length_max = command_length_max;
132 mtx_unlock(&sc->sc_mtx);
133 }
134 return (error);
135 }
136
137 static int
138 spigen_data_length_max_proc(SYSCTL_HANDLER_ARGS)
139 {
140 struct spigen_softc *sc = (struct spigen_softc *)arg1;
141 uint32_t data_length_max;
142 int error;
143
144 mtx_lock(&sc->sc_mtx);
145 data_length_max = sc->sc_data_length_max;
146 mtx_unlock(&sc->sc_mtx);
147 error = sysctl_handle_int(oidp, &data_length_max,
148 sizeof(data_length_max), req);
149 if (error == 0 && req->newptr != NULL) {
150 mtx_lock(&sc->sc_mtx);
151 if (sc->sc_mmap_buffer != NULL)
152 error = EBUSY;
153 else
154 sc->sc_data_length_max = data_length_max;
155 mtx_unlock(&sc->sc_mtx);
156 }
157 return (error);
158 }
159
160 static void
161 spigen_sysctl_init(struct spigen_softc *sc)
162 {
163 struct sysctl_ctx_list *ctx;
164 struct sysctl_oid *tree_node;
165 struct sysctl_oid_list *tree;
166
167 /*
168 * Add system sysctl tree/handlers.
169 */
170 ctx = device_get_sysctl_ctx(sc->sc_dev);
171 tree_node = device_get_sysctl_tree(sc->sc_dev);
172 tree = SYSCTL_CHILDREN(tree_node);
173 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "command_length_max",
174 CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc),
175 spigen_command_length_max_proc, "IU", "SPI command header portion (octets)");
176 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "data_length_max",
177 CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc),
178 spigen_data_length_max_proc, "IU", "SPI data trailer portion (octets)");
179 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "data", CTLFLAG_RW,
180 &sc->sc_debug, 0, "debug flags");
181
182 }
183
184 static int
185 spigen_attach(device_t dev)
186 {
187 struct spigen_softc *sc;
188 const int unit = device_get_unit(dev);
189
190 sc = device_get_softc(dev);
191 sc->sc_dev = dev;
192 sc->sc_cdev = make_dev(&spigen_cdevsw, unit,
193 UID_ROOT, GID_OPERATOR, 0660, "spigen%d", unit);
194 sc->sc_cdev->si_drv1 = dev;
195 sc->sc_command_length_max = PAGE_SIZE;
196 sc->sc_data_length_max = PAGE_SIZE;
197 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
198 spigen_sysctl_init(sc);
199
200 return (0);
201 }
202
203 static int
204 spigen_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
205 {
206 int error;
207 device_t dev;
208 struct spigen_softc *sc;
209
210 error = 0;
211 dev = cdev->si_drv1;
212 sc = device_get_softc(dev);
213
214 mtx_lock(&sc->sc_mtx);
215 if (sc->sc_flags & SPIGEN_OPEN)
216 error = EBUSY;
217 else
218 sc->sc_flags |= SPIGEN_OPEN;
219 mtx_unlock(&sc->sc_mtx);
220
221 return (error);
222 }
223
224 static int
225 spigen_transfer(struct cdev *cdev, struct spigen_transfer *st)
226 {
227 struct spi_command transfer = SPI_COMMAND_INITIALIZER;
228 device_t dev = cdev->si_drv1;
229 struct spigen_softc *sc = device_get_softc(dev);
230 int error = 0;
231
232 mtx_lock(&sc->sc_mtx);
233 if (st->st_command.iov_len == 0)
234 error = EINVAL;
235 else if (st->st_command.iov_len > sc->sc_command_length_max ||
236 st->st_data.iov_len > sc->sc_data_length_max)
237 error = ENOMEM;
238 mtx_unlock(&sc->sc_mtx);
239 if (error)
240 return (error);
241
242 #if 0
243 device_printf(dev, "cmd %p %u data %p %u\n", st->st_command.iov_base,
244 st->st_command.iov_len, st->st_data.iov_base, st->st_data.iov_len);
245 #endif
246 transfer.tx_cmd = transfer.rx_cmd = malloc(st->st_command.iov_len,
247 M_DEVBUF, M_WAITOK);
248 if (st->st_data.iov_len > 0) {
249 transfer.tx_data = transfer.rx_data = malloc(st->st_data.iov_len,
250 M_DEVBUF, M_WAITOK);
251 }
252 else
253 transfer.tx_data = transfer.rx_data = NULL;
254
255 error = copyin(st->st_command.iov_base, transfer.tx_cmd,
256 transfer.tx_cmd_sz = transfer.rx_cmd_sz = st->st_command.iov_len);
257 if ((error == 0) && (st->st_data.iov_len > 0))
258 error = copyin(st->st_data.iov_base, transfer.tx_data,
259 transfer.tx_data_sz = transfer.rx_data_sz =
260 st->st_data.iov_len);
261 if (error == 0)
262 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer);
263 if (error == 0) {
264 error = copyout(transfer.rx_cmd, st->st_command.iov_base,
265 transfer.rx_cmd_sz);
266 if ((error == 0) && (st->st_data.iov_len > 0))
267 error = copyout(transfer.rx_data, st->st_data.iov_base,
268 transfer.rx_data_sz);
269 }
270
271 free(transfer.tx_cmd, M_DEVBUF);
272 free(transfer.tx_data, M_DEVBUF);
273 return (error);
274 }
275
276 static int
277 spigen_transfer_mmapped(struct cdev *cdev, struct spigen_transfer_mmapped *stm)
278 {
279 struct spi_command transfer = SPI_COMMAND_INITIALIZER;
280 device_t dev = cdev->si_drv1;
281 struct spigen_softc *sc = device_get_softc(dev);
282 int error = 0;
283
284 mtx_lock(&sc->sc_mtx);
285 if (sc->sc_flags & SPIGEN_MMAP_BUSY)
286 error = EBUSY;
287 else if (stm->stm_command_length > sc->sc_command_length_max ||
288 stm->stm_data_length > sc->sc_data_length_max)
289 error = E2BIG;
290 else if (sc->sc_mmap_buffer == NULL)
291 error = EINVAL;
292 else if (sc->sc_mmap_buffer_size <
293 stm->stm_command_length + stm->stm_data_length)
294 error = ENOMEM;
295 if (error == 0)
296 sc->sc_flags |= SPIGEN_MMAP_BUSY;
297 mtx_unlock(&sc->sc_mtx);
298 if (error)
299 return (error);
300
301 transfer.tx_cmd = transfer.rx_cmd = (void *)sc->sc_mmap_kvaddr;
302 transfer.tx_cmd_sz = transfer.rx_cmd_sz = stm->stm_command_length;
303 transfer.tx_data = transfer.rx_data =
304 (void *)(sc->sc_mmap_kvaddr + stm->stm_command_length);
305 transfer.tx_data_sz = transfer.rx_data_sz = stm->stm_data_length;
306 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer);
307
308 mtx_lock(&sc->sc_mtx);
309 KASSERT((sc->sc_flags & SPIGEN_MMAP_BUSY), ("mmap no longer marked busy"));
310 sc->sc_flags &= ~(SPIGEN_MMAP_BUSY);
311 mtx_unlock(&sc->sc_mtx);
312 return (error);
313 }
314
315 static int
316 spigen_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
317 struct thread *td)
318 {
319 device_t dev = cdev->si_drv1;
320 int error;
321
322 switch (cmd) {
323 case SPIGENIOC_TRANSFER:
324 error = spigen_transfer(cdev, (struct spigen_transfer *)data);
325 break;
326 case SPIGENIOC_TRANSFER_MMAPPED:
327 error = spigen_transfer_mmapped(cdev, (struct spigen_transfer_mmapped *)data);
328 break;
329 case SPIGENIOC_GET_CLOCK_SPEED:
330 error = spibus_get_clock(dev, (uint32_t *)data);
331 break;
332 case SPIGENIOC_SET_CLOCK_SPEED:
333 error = spibus_set_clock(dev, *(uint32_t *)data);
334 break;
335 case SPIGENIOC_GET_SPI_MODE:
336 error = spibus_get_mode(dev, (uint32_t *)data);
337 break;
338 case SPIGENIOC_SET_SPI_MODE:
339 error = spibus_set_mode(dev, *(uint32_t *)data);
340 break;
341 default:
342 error = ENOTTY;
343 break;
344 }
345 return (error);
346 }
347
348 static int
349 spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
350 vm_size_t size, struct vm_object **object, int nprot)
351 {
352 device_t dev = cdev->si_drv1;
353 struct spigen_softc *sc = device_get_softc(dev);
354 vm_page_t *m;
355 size_t n, pages;
356
357 if (size == 0 ||
358 (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE))
359 != (PROT_READ | PROT_WRITE))
360 return (EINVAL);
361 size = roundup2(size, PAGE_SIZE);
362 pages = size / PAGE_SIZE;
363
364 mtx_lock(&sc->sc_mtx);
365 if (sc->sc_mmap_buffer != NULL) {
366 mtx_unlock(&sc->sc_mtx);
367 return (EBUSY);
368 } else if (size > sc->sc_command_length_max + sc->sc_data_length_max) {
369 mtx_unlock(&sc->sc_mtx);
370 return (E2BIG);
371 }
372 sc->sc_mmap_buffer_size = size;
373 *offset = 0;
374 sc->sc_mmap_buffer = *object = vm_pager_allocate(OBJT_PHYS, 0, size,
375 nprot, *offset, curthread->td_ucred);
376 m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
377 VM_OBJECT_WLOCK(*object);
378 vm_object_reference_locked(*object); // kernel and userland both
379 for (n = 0; n < pages; n++) {
380 m[n] = vm_page_grab(*object, n,
381 VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
382 m[n]->valid = VM_PAGE_BITS_ALL;
383 }
384 VM_OBJECT_WUNLOCK(*object);
385 sc->sc_mmap_kvaddr = kva_alloc(size);
386 pmap_qenter(sc->sc_mmap_kvaddr, m, pages);
387 free(m, M_TEMP);
388 mtx_unlock(&sc->sc_mtx);
389
390 if (*object == NULL)
391 return (EINVAL);
392 return (0);
393 }
394
395 static int
396 spigen_close(struct cdev *cdev, int fflag, int devtype, struct thread *td)
397 {
398 device_t dev = cdev->si_drv1;
399 struct spigen_softc *sc = device_get_softc(dev);
400
401 mtx_lock(&sc->sc_mtx);
402 if (sc->sc_mmap_buffer != NULL) {
403 pmap_qremove(sc->sc_mmap_kvaddr,
404 sc->sc_mmap_buffer_size / PAGE_SIZE);
405 kva_free(sc->sc_mmap_kvaddr, sc->sc_mmap_buffer_size);
406 sc->sc_mmap_kvaddr = 0;
407 vm_object_deallocate(sc->sc_mmap_buffer);
408 sc->sc_mmap_buffer = NULL;
409 sc->sc_mmap_buffer_size = 0;
410 }
411 sc->sc_flags &= ~(SPIGEN_OPEN);
412 mtx_unlock(&sc->sc_mtx);
413 return (0);
414 }
415
416 static int
417 spigen_detach(device_t dev)
418 {
419 struct spigen_softc *sc;
420
421 sc = device_get_softc(dev);
422
423 mtx_lock(&sc->sc_mtx);
424 if (sc->sc_flags & SPIGEN_OPEN) {
425 mtx_unlock(&sc->sc_mtx);
426 return (EBUSY);
427 }
428 mtx_unlock(&sc->sc_mtx);
429
430 mtx_destroy(&sc->sc_mtx);
431
432 if (sc->sc_cdev)
433 destroy_dev(sc->sc_cdev);
434
435 return (0);
436 }
437
438 static devclass_t spigen_devclass;
439
440 static device_method_t spigen_methods[] = {
441 /* Device interface */
442 DEVMETHOD(device_probe, spigen_probe),
443 DEVMETHOD(device_attach, spigen_attach),
444 DEVMETHOD(device_detach, spigen_detach),
445
446 { 0, 0 }
447 };
448
449 static driver_t spigen_driver = {
450 "spigen",
451 spigen_methods,
452 sizeof(struct spigen_softc),
453 };
454
455 DRIVER_MODULE(spigen, spibus, spigen_driver, spigen_devclass, 0, 0);
456 MODULE_DEPEND(spigen, spibus, 1, 1, 1);
Cache object: 09de5c3a8d932e48174a528db1e63ab7
|