FreeBSD/Linux Kernel Cross Reference
sys/geom/part/g_part.c
1 /*-
2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.3/sys/geom/part/g_part.c 231345 2012-02-10 06:38:57Z ae $");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/sysctl.h>
43 #include <sys/systm.h>
44 #include <sys/uuid.h>
45 #include <geom/geom.h>
46 #include <geom/geom_ctl.h>
47 #include <geom/geom_int.h>
48 #include <geom/part/g_part.h>
49
50 #include "g_part_if.h"
51
52 #ifndef _PATH_DEV
53 #define _PATH_DEV "/dev/"
54 #endif
55
56 static kobj_method_t g_part_null_methods[] = {
57 { 0, 0 }
58 };
59
60 static struct g_part_scheme g_part_null_scheme = {
61 "(none)",
62 g_part_null_methods,
63 sizeof(struct g_part_table),
64 };
65
66 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
67 TAILQ_HEAD_INITIALIZER(g_part_schemes);
68
69 struct g_part_alias_list {
70 const char *lexeme;
71 enum g_part_alias alias;
72 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
75 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
76 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81 { "ebr", G_PART_ALIAS_EBR },
82 { "efi", G_PART_ALIAS_EFI },
83 { "fat32", G_PART_ALIAS_MS_FAT32 },
84 { "freebsd", G_PART_ALIAS_FREEBSD },
85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
86 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
87 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
88 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
89 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
90 { "linux-data", G_PART_ALIAS_LINUX_DATA },
91 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
92 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
93 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
94 { "mbr", G_PART_ALIAS_MBR },
95 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
96 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
97 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
98 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
99 { "ntfs", G_PART_ALIAS_MS_NTFS },
100 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
101 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
102 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
103 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
104 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
105 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
106 };
107
108 SYSCTL_DECL(_kern_geom);
109 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, "GEOM_PART stuff");
110 static u_int check_integrity = 0;
111 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity);
112 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, CTLFLAG_RW,
113 &check_integrity, 0, "Enable integrity checking");
114
115 /*
116 * The GEOM partitioning class.
117 */
118 static g_ctl_req_t g_part_ctlreq;
119 static g_ctl_destroy_geom_t g_part_destroy_geom;
120 static g_fini_t g_part_fini;
121 static g_init_t g_part_init;
122 static g_taste_t g_part_taste;
123
124 static g_access_t g_part_access;
125 static g_dumpconf_t g_part_dumpconf;
126 static g_orphan_t g_part_orphan;
127 static g_spoiled_t g_part_spoiled;
128 static g_start_t g_part_start;
129
130 static struct g_class g_part_class = {
131 .name = "PART",
132 .version = G_VERSION,
133 /* Class methods. */
134 .ctlreq = g_part_ctlreq,
135 .destroy_geom = g_part_destroy_geom,
136 .fini = g_part_fini,
137 .init = g_part_init,
138 .taste = g_part_taste,
139 /* Geom methods. */
140 .access = g_part_access,
141 .dumpconf = g_part_dumpconf,
142 .orphan = g_part_orphan,
143 .spoiled = g_part_spoiled,
144 .start = g_part_start,
145 };
146
147 DECLARE_GEOM_CLASS(g_part_class, g_part);
148
149 /*
150 * Support functions.
151 */
152
153 static void g_part_wither(struct g_geom *, int);
154
155 const char *
156 g_part_alias_name(enum g_part_alias alias)
157 {
158 int i;
159
160 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
161 if (g_part_alias_list[i].alias != alias)
162 continue;
163 return (g_part_alias_list[i].lexeme);
164 }
165
166 return (NULL);
167 }
168
169 void
170 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
171 u_int *bestheads)
172 {
173 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
174 off_t chs, cylinders;
175 u_int heads;
176 int idx;
177
178 *bestchs = 0;
179 *bestheads = 0;
180 for (idx = 0; candidate_heads[idx] != 0; idx++) {
181 heads = candidate_heads[idx];
182 cylinders = blocks / heads / sectors;
183 if (cylinders < heads || cylinders < sectors)
184 break;
185 if (cylinders > 1023)
186 continue;
187 chs = cylinders * heads * sectors;
188 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
189 *bestchs = chs;
190 *bestheads = heads;
191 }
192 }
193 }
194
195 static void
196 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
197 off_t blocks)
198 {
199 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
200 off_t chs, bestchs;
201 u_int heads, sectors;
202 int idx;
203
204 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
205 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
206 table->gpt_fixgeom = 0;
207 table->gpt_heads = 0;
208 table->gpt_sectors = 0;
209 bestchs = 0;
210 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
211 sectors = candidate_sectors[idx];
212 g_part_geometry_heads(blocks, sectors, &chs, &heads);
213 if (chs == 0)
214 continue;
215 /*
216 * Prefer a geometry with sectors > 1, but only if
217 * it doesn't bump down the numbver of heads to 1.
218 */
219 if (chs > bestchs || (chs == bestchs && heads > 1 &&
220 table->gpt_sectors == 1)) {
221 bestchs = chs;
222 table->gpt_heads = heads;
223 table->gpt_sectors = sectors;
224 }
225 }
226 /*
227 * If we didn't find a geometry at all, then the disk is
228 * too big. This means we can use the maximum number of
229 * heads and sectors.
230 */
231 if (bestchs == 0) {
232 table->gpt_heads = 255;
233 table->gpt_sectors = 63;
234 }
235 } else {
236 table->gpt_fixgeom = 1;
237 table->gpt_heads = heads;
238 table->gpt_sectors = sectors;
239 }
240 }
241
242 #define DPRINTF(...) if (bootverbose) { \
243 printf("GEOM_PART: " __VA_ARGS__); \
244 }
245
246 static int
247 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
248 {
249 struct g_part_entry *e1, *e2;
250 struct g_provider *pp;
251 off_t offset;
252 int failed;
253
254 failed = 0;
255 pp = cp->provider;
256 if (table->gpt_last < table->gpt_first) {
257 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
258 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
259 failed++;
260 }
261 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
262 DPRINTF("last LBA extends beyond mediasize: "
263 "%jd > %jd\n", (intmax_t)table->gpt_last,
264 (intmax_t)pp->mediasize / pp->sectorsize - 1);
265 failed++;
266 }
267 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
268 if (e1->gpe_deleted || e1->gpe_internal)
269 continue;
270 if (e1->gpe_start < table->gpt_first) {
271 DPRINTF("partition %d has start offset below first "
272 "LBA: %jd < %jd\n", e1->gpe_index,
273 (intmax_t)e1->gpe_start,
274 (intmax_t)table->gpt_first);
275 failed++;
276 }
277 if (e1->gpe_start > table->gpt_last) {
278 DPRINTF("partition %d has start offset beyond last "
279 "LBA: %jd > %jd\n", e1->gpe_index,
280 (intmax_t)e1->gpe_start,
281 (intmax_t)table->gpt_last);
282 failed++;
283 }
284 if (e1->gpe_end < e1->gpe_start) {
285 DPRINTF("partition %d has end offset below start "
286 "offset: %jd < %jd\n", e1->gpe_index,
287 (intmax_t)e1->gpe_end,
288 (intmax_t)e1->gpe_start);
289 failed++;
290 }
291 if (e1->gpe_end > table->gpt_last) {
292 DPRINTF("partition %d has end offset beyond last "
293 "LBA: %jd > %jd\n", e1->gpe_index,
294 (intmax_t)e1->gpe_end,
295 (intmax_t)table->gpt_last);
296 failed++;
297 }
298 if (pp->stripesize > 0) {
299 offset = e1->gpe_start * pp->sectorsize;
300 if (e1->gpe_offset > offset)
301 offset = e1->gpe_offset;
302 if ((offset + pp->stripeoffset) % pp->stripesize) {
303 DPRINTF("partition %d is not aligned on %u "
304 "bytes\n", e1->gpe_index, pp->stripesize);
305 /* Don't treat this as a critical failure */
306 }
307 }
308 e2 = e1;
309 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
310 if (e2->gpe_deleted || e2->gpe_internal)
311 continue;
312 if (e1->gpe_start >= e2->gpe_start &&
313 e1->gpe_start <= e2->gpe_end) {
314 DPRINTF("partition %d has start offset inside "
315 "partition %d: start[%d] %jd >= start[%d] "
316 "%jd <= end[%d] %jd\n",
317 e1->gpe_index, e2->gpe_index,
318 e2->gpe_index, (intmax_t)e2->gpe_start,
319 e1->gpe_index, (intmax_t)e1->gpe_start,
320 e2->gpe_index, (intmax_t)e2->gpe_end);
321 failed++;
322 }
323 if (e1->gpe_end >= e2->gpe_start &&
324 e1->gpe_end <= e2->gpe_end) {
325 DPRINTF("partition %d has end offset inside "
326 "partition %d: start[%d] %jd >= end[%d] "
327 "%jd <= end[%d] %jd\n",
328 e1->gpe_index, e2->gpe_index,
329 e2->gpe_index, (intmax_t)e2->gpe_start,
330 e1->gpe_index, (intmax_t)e1->gpe_end,
331 e2->gpe_index, (intmax_t)e2->gpe_end);
332 failed++;
333 }
334 if (e1->gpe_start < e2->gpe_start &&
335 e1->gpe_end > e2->gpe_end) {
336 DPRINTF("partition %d contains partition %d: "
337 "start[%d] %jd > start[%d] %jd, end[%d] "
338 "%jd < end[%d] %jd\n",
339 e1->gpe_index, e2->gpe_index,
340 e1->gpe_index, (intmax_t)e1->gpe_start,
341 e2->gpe_index, (intmax_t)e2->gpe_start,
342 e2->gpe_index, (intmax_t)e2->gpe_end,
343 e1->gpe_index, (intmax_t)e1->gpe_end);
344 failed++;
345 }
346 }
347 }
348 if (failed != 0) {
349 printf("GEOM_PART: integrity check failed (%s, %s)\n",
350 pp->name, table->gpt_scheme->name);
351 if (check_integrity != 0)
352 return (EINVAL);
353 table->gpt_corrupt = 1;
354 }
355 return (0);
356 }
357 #undef DPRINTF
358
359 struct g_part_entry *
360 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
361 quad_t end)
362 {
363 struct g_part_entry *entry, *last;
364
365 last = NULL;
366 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
367 if (entry->gpe_index == index)
368 break;
369 if (entry->gpe_index > index) {
370 entry = NULL;
371 break;
372 }
373 last = entry;
374 }
375 if (entry == NULL) {
376 entry = g_malloc(table->gpt_scheme->gps_entrysz,
377 M_WAITOK | M_ZERO);
378 entry->gpe_index = index;
379 if (last == NULL)
380 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
381 else
382 LIST_INSERT_AFTER(last, entry, gpe_entry);
383 } else
384 entry->gpe_offset = 0;
385 entry->gpe_start = start;
386 entry->gpe_end = end;
387 return (entry);
388 }
389
390 static void
391 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
392 struct g_part_entry *entry)
393 {
394 struct g_consumer *cp;
395 struct g_provider *pp;
396 struct sbuf *sb;
397 off_t offset;
398
399 cp = LIST_FIRST(&gp->consumer);
400 pp = cp->provider;
401
402 offset = entry->gpe_start * pp->sectorsize;
403 if (entry->gpe_offset < offset)
404 entry->gpe_offset = offset;
405
406 if (entry->gpe_pp == NULL) {
407 sb = sbuf_new_auto();
408 G_PART_FULLNAME(table, entry, sb, gp->name);
409 sbuf_finish(sb);
410 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
411 sbuf_delete(sb);
412 entry->gpe_pp->private = entry; /* Close the circle. */
413 }
414 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
415 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
416 pp->sectorsize;
417 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
418 entry->gpe_pp->sectorsize = pp->sectorsize;
419 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
420 entry->gpe_pp->stripesize = pp->stripesize;
421 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
422 if (pp->stripesize > 0)
423 entry->gpe_pp->stripeoffset %= pp->stripesize;
424 g_error_provider(entry->gpe_pp, 0);
425 }
426
427 static struct g_geom*
428 g_part_find_geom(const char *name)
429 {
430 struct g_geom *gp;
431 LIST_FOREACH(gp, &g_part_class.geom, geom) {
432 if (!strcmp(name, gp->name))
433 break;
434 }
435 return (gp);
436 }
437
438 static int
439 g_part_parm_geom(const char *name, struct g_geom **v)
440 {
441 struct g_geom *gp;
442
443 if (strncmp(name, _PATH_DEV, strlen(_PATH_DEV)) == 0)
444 name += strlen(_PATH_DEV);
445 gp = g_part_find_geom(name);
446 if (gp == NULL)
447 return (EINVAL);
448 if ((gp->flags & G_GEOM_WITHER) != 0)
449 return (ENXIO);
450 *v = gp;
451 return (0);
452 }
453
454 static int
455 g_part_parm_provider(const char *name, struct g_provider **v)
456 {
457 struct g_provider *pp;
458
459 if (strncmp(name, _PATH_DEV, strlen(_PATH_DEV)) == 0)
460 name += strlen(_PATH_DEV);
461 pp = g_provider_by_name(name);
462 if (pp == NULL)
463 return (EINVAL);
464 *v = pp;
465 return (0);
466 }
467
468 static int
469 g_part_parm_quad(const char *p, quad_t *v)
470 {
471 char *x;
472 quad_t q;
473
474 q = strtoq(p, &x, 0);
475 if (*x != '\0' || q < 0)
476 return (EINVAL);
477 *v = q;
478 return (0);
479 }
480
481 static int
482 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
483 {
484 struct g_part_scheme *s;
485
486 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
487 if (s == &g_part_null_scheme)
488 continue;
489 if (!strcasecmp(s->name, p))
490 break;
491 }
492 if (s == NULL)
493 return (EINVAL);
494 *v = s;
495 return (0);
496 }
497
498 static int
499 g_part_parm_str(const char *p, const char **v)
500 {
501
502 if (p[0] == '\0')
503 return (EINVAL);
504 *v = p;
505 return (0);
506 }
507
508 static int
509 g_part_parm_uint(const char *p, u_int *v)
510 {
511 char *x;
512 long l;
513
514 l = strtol(p, &x, 0);
515 if (*x != '\0' || l < 0 || l > INT_MAX)
516 return (EINVAL);
517 *v = (unsigned int)l;
518 return (0);
519 }
520
521 static int
522 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
523 {
524 struct g_part_scheme *iter, *scheme;
525 struct g_part_table *table;
526 int pri, probe;
527
528 table = gp->softc;
529 scheme = (table != NULL) ? table->gpt_scheme : NULL;
530 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
531 if (pri == 0)
532 goto done;
533 if (pri > 0) { /* error */
534 scheme = NULL;
535 pri = INT_MIN;
536 }
537
538 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
539 if (iter == &g_part_null_scheme)
540 continue;
541 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
542 M_WAITOK);
543 table->gpt_gp = gp;
544 table->gpt_scheme = iter;
545 table->gpt_depth = depth;
546 probe = G_PART_PROBE(table, cp);
547 if (probe <= 0 && probe > pri) {
548 pri = probe;
549 scheme = iter;
550 if (gp->softc != NULL)
551 kobj_delete((kobj_t)gp->softc, M_GEOM);
552 gp->softc = table;
553 if (pri == 0)
554 goto done;
555 } else
556 kobj_delete((kobj_t)table, M_GEOM);
557 }
558
559 done:
560 return ((scheme == NULL) ? ENXIO : 0);
561 }
562
563 /*
564 * Control request functions.
565 */
566
567 static int
568 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
569 {
570 struct g_geom *gp;
571 struct g_provider *pp;
572 struct g_part_entry *delent, *last, *entry;
573 struct g_part_table *table;
574 struct sbuf *sb;
575 quad_t end;
576 unsigned int index;
577 int error;
578
579 gp = gpp->gpp_geom;
580 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
581 g_topology_assert();
582
583 pp = LIST_FIRST(&gp->consumer)->provider;
584 table = gp->softc;
585 end = gpp->gpp_start + gpp->gpp_size - 1;
586
587 if (gpp->gpp_start < table->gpt_first ||
588 gpp->gpp_start > table->gpt_last) {
589 gctl_error(req, "%d start '%jd'", EINVAL,
590 (intmax_t)gpp->gpp_start);
591 return (EINVAL);
592 }
593 if (end < gpp->gpp_start || end > table->gpt_last) {
594 gctl_error(req, "%d size '%jd'", EINVAL,
595 (intmax_t)gpp->gpp_size);
596 return (EINVAL);
597 }
598 if (gpp->gpp_index > table->gpt_entries) {
599 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
600 return (EINVAL);
601 }
602
603 delent = last = NULL;
604 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
605 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
606 if (entry->gpe_deleted) {
607 if (entry->gpe_index == index)
608 delent = entry;
609 continue;
610 }
611 if (entry->gpe_index == index)
612 index = entry->gpe_index + 1;
613 if (entry->gpe_index < index)
614 last = entry;
615 if (entry->gpe_internal)
616 continue;
617 if (gpp->gpp_start >= entry->gpe_start &&
618 gpp->gpp_start <= entry->gpe_end) {
619 gctl_error(req, "%d start '%jd'", ENOSPC,
620 (intmax_t)gpp->gpp_start);
621 return (ENOSPC);
622 }
623 if (end >= entry->gpe_start && end <= entry->gpe_end) {
624 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
625 return (ENOSPC);
626 }
627 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
628 gctl_error(req, "%d size '%jd'", ENOSPC,
629 (intmax_t)gpp->gpp_size);
630 return (ENOSPC);
631 }
632 }
633 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
634 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
635 return (EEXIST);
636 }
637 if (index > table->gpt_entries) {
638 gctl_error(req, "%d index '%d'", ENOSPC, index);
639 return (ENOSPC);
640 }
641
642 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
643 M_WAITOK | M_ZERO) : delent;
644 entry->gpe_index = index;
645 entry->gpe_start = gpp->gpp_start;
646 entry->gpe_end = end;
647 error = G_PART_ADD(table, entry, gpp);
648 if (error) {
649 gctl_error(req, "%d", error);
650 if (delent == NULL)
651 g_free(entry);
652 return (error);
653 }
654 if (delent == NULL) {
655 if (last == NULL)
656 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
657 else
658 LIST_INSERT_AFTER(last, entry, gpe_entry);
659 entry->gpe_created = 1;
660 } else {
661 entry->gpe_deleted = 0;
662 entry->gpe_modified = 1;
663 }
664 g_part_new_provider(gp, table, entry);
665
666 /* Provide feedback if so requested. */
667 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
668 sb = sbuf_new_auto();
669 G_PART_FULLNAME(table, entry, sb, gp->name);
670 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
671 sbuf_printf(sb, " added, but partition is not "
672 "aligned on %u bytes\n", pp->stripesize);
673 else
674 sbuf_cat(sb, " added\n");
675 sbuf_finish(sb);
676 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
677 sbuf_delete(sb);
678 }
679 return (0);
680 }
681
682 static int
683 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
684 {
685 struct g_geom *gp;
686 struct g_part_table *table;
687 struct sbuf *sb;
688 int error, sz;
689
690 gp = gpp->gpp_geom;
691 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
692 g_topology_assert();
693
694 table = gp->softc;
695 sz = table->gpt_scheme->gps_bootcodesz;
696 if (sz == 0) {
697 error = ENODEV;
698 goto fail;
699 }
700 if (gpp->gpp_codesize > sz) {
701 error = EFBIG;
702 goto fail;
703 }
704
705 error = G_PART_BOOTCODE(table, gpp);
706 if (error)
707 goto fail;
708
709 /* Provide feedback if so requested. */
710 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
711 sb = sbuf_new_auto();
712 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
713 sbuf_finish(sb);
714 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
715 sbuf_delete(sb);
716 }
717 return (0);
718
719 fail:
720 gctl_error(req, "%d", error);
721 return (error);
722 }
723
724 static int
725 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
726 {
727 struct g_consumer *cp;
728 struct g_geom *gp;
729 struct g_provider *pp;
730 struct g_part_entry *entry, *tmp;
731 struct g_part_table *table;
732 char *buf;
733 int error, i;
734
735 gp = gpp->gpp_geom;
736 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
737 g_topology_assert();
738
739 table = gp->softc;
740 if (!table->gpt_opened) {
741 gctl_error(req, "%d", EPERM);
742 return (EPERM);
743 }
744
745 g_topology_unlock();
746
747 cp = LIST_FIRST(&gp->consumer);
748 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
749 pp = cp->provider;
750 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
751 while (table->gpt_smhead != 0) {
752 i = ffs(table->gpt_smhead) - 1;
753 error = g_write_data(cp, i * pp->sectorsize, buf,
754 pp->sectorsize);
755 if (error) {
756 g_free(buf);
757 goto fail;
758 }
759 table->gpt_smhead &= ~(1 << i);
760 }
761 while (table->gpt_smtail != 0) {
762 i = ffs(table->gpt_smtail) - 1;
763 error = g_write_data(cp, pp->mediasize - (i + 1) *
764 pp->sectorsize, buf, pp->sectorsize);
765 if (error) {
766 g_free(buf);
767 goto fail;
768 }
769 table->gpt_smtail &= ~(1 << i);
770 }
771 g_free(buf);
772 }
773
774 if (table->gpt_scheme == &g_part_null_scheme) {
775 g_topology_lock();
776 g_access(cp, -1, -1, -1);
777 g_part_wither(gp, ENXIO);
778 return (0);
779 }
780
781 error = G_PART_WRITE(table, cp);
782 if (error)
783 goto fail;
784
785 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
786 if (!entry->gpe_deleted) {
787 entry->gpe_created = 0;
788 entry->gpe_modified = 0;
789 continue;
790 }
791 LIST_REMOVE(entry, gpe_entry);
792 g_free(entry);
793 }
794 table->gpt_created = 0;
795 table->gpt_opened = 0;
796
797 g_topology_lock();
798 g_access(cp, -1, -1, -1);
799 return (0);
800
801 fail:
802 g_topology_lock();
803 gctl_error(req, "%d", error);
804 return (error);
805 }
806
807 static int
808 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
809 {
810 struct g_consumer *cp;
811 struct g_geom *gp;
812 struct g_provider *pp;
813 struct g_part_scheme *scheme;
814 struct g_part_table *null, *table;
815 struct sbuf *sb;
816 int attr, error;
817
818 pp = gpp->gpp_provider;
819 scheme = gpp->gpp_scheme;
820 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
821 g_topology_assert();
822
823 /* Check that there isn't already a g_part geom on the provider. */
824 gp = g_part_find_geom(pp->name);
825 if (gp != NULL) {
826 null = gp->softc;
827 if (null->gpt_scheme != &g_part_null_scheme) {
828 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
829 return (EEXIST);
830 }
831 } else
832 null = NULL;
833
834 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
835 (gpp->gpp_entries < scheme->gps_minent ||
836 gpp->gpp_entries > scheme->gps_maxent)) {
837 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
838 return (EINVAL);
839 }
840
841 if (null == NULL)
842 gp = g_new_geomf(&g_part_class, "%s", pp->name);
843 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
844 M_WAITOK);
845 table = gp->softc;
846 table->gpt_gp = gp;
847 table->gpt_scheme = gpp->gpp_scheme;
848 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
849 gpp->gpp_entries : scheme->gps_minent;
850 LIST_INIT(&table->gpt_entry);
851 if (null == NULL) {
852 cp = g_new_consumer(gp);
853 error = g_attach(cp, pp);
854 if (error == 0)
855 error = g_access(cp, 1, 1, 1);
856 if (error != 0) {
857 g_part_wither(gp, error);
858 gctl_error(req, "%d geom '%s'", error, pp->name);
859 return (error);
860 }
861 table->gpt_opened = 1;
862 } else {
863 cp = LIST_FIRST(&gp->consumer);
864 table->gpt_opened = null->gpt_opened;
865 table->gpt_smhead = null->gpt_smhead;
866 table->gpt_smtail = null->gpt_smtail;
867 }
868
869 g_topology_unlock();
870
871 /* Make sure the provider has media. */
872 if (pp->mediasize == 0 || pp->sectorsize == 0) {
873 error = ENODEV;
874 goto fail;
875 }
876
877 /* Make sure we can nest and if so, determine our depth. */
878 error = g_getattr("PART::isleaf", cp, &attr);
879 if (!error && attr) {
880 error = ENODEV;
881 goto fail;
882 }
883 error = g_getattr("PART::depth", cp, &attr);
884 table->gpt_depth = (!error) ? attr + 1 : 0;
885
886 /*
887 * Synthesize a disk geometry. Some partitioning schemes
888 * depend on it and since some file systems need it even
889 * when the partitition scheme doesn't, we do it here in
890 * scheme-independent code.
891 */
892 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
893
894 error = G_PART_CREATE(table, gpp);
895 if (error)
896 goto fail;
897
898 g_topology_lock();
899
900 table->gpt_created = 1;
901 if (null != NULL)
902 kobj_delete((kobj_t)null, M_GEOM);
903
904 /*
905 * Support automatic commit by filling in the gpp_geom
906 * parameter.
907 */
908 gpp->gpp_parms |= G_PART_PARM_GEOM;
909 gpp->gpp_geom = gp;
910
911 /* Provide feedback if so requested. */
912 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
913 sb = sbuf_new_auto();
914 sbuf_printf(sb, "%s created\n", gp->name);
915 sbuf_finish(sb);
916 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
917 sbuf_delete(sb);
918 }
919 return (0);
920
921 fail:
922 g_topology_lock();
923 if (null == NULL) {
924 g_access(cp, -1, -1, -1);
925 g_part_wither(gp, error);
926 } else {
927 kobj_delete((kobj_t)gp->softc, M_GEOM);
928 gp->softc = null;
929 }
930 gctl_error(req, "%d provider", error);
931 return (error);
932 }
933
934 static int
935 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
936 {
937 struct g_geom *gp;
938 struct g_provider *pp;
939 struct g_part_entry *entry;
940 struct g_part_table *table;
941 struct sbuf *sb;
942
943 gp = gpp->gpp_geom;
944 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
945 g_topology_assert();
946
947 table = gp->softc;
948
949 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
950 if (entry->gpe_deleted || entry->gpe_internal)
951 continue;
952 if (entry->gpe_index == gpp->gpp_index)
953 break;
954 }
955 if (entry == NULL) {
956 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
957 return (ENOENT);
958 }
959
960 pp = entry->gpe_pp;
961 if (pp != NULL) {
962 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
963 gctl_error(req, "%d", EBUSY);
964 return (EBUSY);
965 }
966
967 pp->private = NULL;
968 entry->gpe_pp = NULL;
969 }
970
971 if (pp != NULL)
972 g_wither_provider(pp, ENXIO);
973
974 /* Provide feedback if so requested. */
975 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
976 sb = sbuf_new_auto();
977 G_PART_FULLNAME(table, entry, sb, gp->name);
978 sbuf_cat(sb, " deleted\n");
979 sbuf_finish(sb);
980 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
981 sbuf_delete(sb);
982 }
983
984 if (entry->gpe_created) {
985 LIST_REMOVE(entry, gpe_entry);
986 g_free(entry);
987 } else {
988 entry->gpe_modified = 0;
989 entry->gpe_deleted = 1;
990 }
991 return (0);
992 }
993
994 static int
995 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
996 {
997 struct g_consumer *cp;
998 struct g_geom *gp;
999 struct g_provider *pp;
1000 struct g_part_entry *entry, *tmp;
1001 struct g_part_table *null, *table;
1002 struct sbuf *sb;
1003 int error;
1004
1005 gp = gpp->gpp_geom;
1006 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1007 g_topology_assert();
1008
1009 table = gp->softc;
1010 /* Check for busy providers. */
1011 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1012 if (entry->gpe_deleted || entry->gpe_internal)
1013 continue;
1014 if (gpp->gpp_force) {
1015 pp = entry->gpe_pp;
1016 if (pp == NULL)
1017 continue;
1018 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1019 continue;
1020 }
1021 gctl_error(req, "%d", EBUSY);
1022 return (EBUSY);
1023 }
1024
1025 if (gpp->gpp_force) {
1026 /* Destroy all providers. */
1027 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1028 pp = entry->gpe_pp;
1029 if (pp != NULL) {
1030 pp->private = NULL;
1031 g_wither_provider(pp, ENXIO);
1032 }
1033 LIST_REMOVE(entry, gpe_entry);
1034 g_free(entry);
1035 }
1036 }
1037
1038 error = G_PART_DESTROY(table, gpp);
1039 if (error) {
1040 gctl_error(req, "%d", error);
1041 return (error);
1042 }
1043
1044 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1045 M_WAITOK);
1046 null = gp->softc;
1047 null->gpt_gp = gp;
1048 null->gpt_scheme = &g_part_null_scheme;
1049 LIST_INIT(&null->gpt_entry);
1050
1051 cp = LIST_FIRST(&gp->consumer);
1052 pp = cp->provider;
1053 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1054
1055 null->gpt_depth = table->gpt_depth;
1056 null->gpt_opened = table->gpt_opened;
1057 null->gpt_smhead = table->gpt_smhead;
1058 null->gpt_smtail = table->gpt_smtail;
1059
1060 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1061 LIST_REMOVE(entry, gpe_entry);
1062 g_free(entry);
1063 }
1064 kobj_delete((kobj_t)table, M_GEOM);
1065
1066 /* Provide feedback if so requested. */
1067 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1068 sb = sbuf_new_auto();
1069 sbuf_printf(sb, "%s destroyed\n", gp->name);
1070 sbuf_finish(sb);
1071 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1072 sbuf_delete(sb);
1073 }
1074 return (0);
1075 }
1076
1077 static int
1078 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1079 {
1080 struct g_geom *gp;
1081 struct g_part_entry *entry;
1082 struct g_part_table *table;
1083 struct sbuf *sb;
1084 int error;
1085
1086 gp = gpp->gpp_geom;
1087 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1088 g_topology_assert();
1089
1090 table = gp->softc;
1091
1092 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1093 if (entry->gpe_deleted || entry->gpe_internal)
1094 continue;
1095 if (entry->gpe_index == gpp->gpp_index)
1096 break;
1097 }
1098 if (entry == NULL) {
1099 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1100 return (ENOENT);
1101 }
1102
1103 error = G_PART_MODIFY(table, entry, gpp);
1104 if (error) {
1105 gctl_error(req, "%d", error);
1106 return (error);
1107 }
1108
1109 if (!entry->gpe_created)
1110 entry->gpe_modified = 1;
1111
1112 /* Provide feedback if so requested. */
1113 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1114 sb = sbuf_new_auto();
1115 G_PART_FULLNAME(table, entry, sb, gp->name);
1116 sbuf_cat(sb, " modified\n");
1117 sbuf_finish(sb);
1118 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1119 sbuf_delete(sb);
1120 }
1121 return (0);
1122 }
1123
1124 static int
1125 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1126 {
1127 gctl_error(req, "%d verb 'move'", ENOSYS);
1128 return (ENOSYS);
1129 }
1130
1131 static int
1132 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1133 {
1134 struct g_part_table *table;
1135 struct g_geom *gp;
1136 struct sbuf *sb;
1137 int error, recovered;
1138
1139 gp = gpp->gpp_geom;
1140 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1141 g_topology_assert();
1142 table = gp->softc;
1143 error = recovered = 0;
1144
1145 if (table->gpt_corrupt) {
1146 error = G_PART_RECOVER(table);
1147 if (error == 0)
1148 error = g_part_check_integrity(table,
1149 LIST_FIRST(&gp->consumer));
1150 if (error) {
1151 gctl_error(req, "%d recovering '%s' failed",
1152 error, gp->name);
1153 return (error);
1154 }
1155 recovered = 1;
1156 }
1157 /* Provide feedback if so requested. */
1158 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1159 sb = sbuf_new_auto();
1160 if (recovered)
1161 sbuf_printf(sb, "%s recovered\n", gp->name);
1162 else
1163 sbuf_printf(sb, "%s recovering is not needed\n",
1164 gp->name);
1165 sbuf_finish(sb);
1166 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1167 sbuf_delete(sb);
1168 }
1169 return (0);
1170 }
1171
1172 static int
1173 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1174 {
1175 struct g_geom *gp;
1176 struct g_provider *pp;
1177 struct g_part_entry *pe, *entry;
1178 struct g_part_table *table;
1179 struct sbuf *sb;
1180 quad_t end;
1181 int error;
1182
1183 gp = gpp->gpp_geom;
1184 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1185 g_topology_assert();
1186 table = gp->softc;
1187
1188 /* check gpp_index */
1189 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1190 if (entry->gpe_deleted || entry->gpe_internal)
1191 continue;
1192 if (entry->gpe_index == gpp->gpp_index)
1193 break;
1194 }
1195 if (entry == NULL) {
1196 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1197 return (ENOENT);
1198 }
1199
1200 /* check gpp_size */
1201 end = entry->gpe_start + gpp->gpp_size - 1;
1202 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1203 gctl_error(req, "%d size '%jd'", EINVAL,
1204 (intmax_t)gpp->gpp_size);
1205 return (EINVAL);
1206 }
1207
1208 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1209 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1210 continue;
1211 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1212 gctl_error(req, "%d end '%jd'", ENOSPC,
1213 (intmax_t)end);
1214 return (ENOSPC);
1215 }
1216 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1217 gctl_error(req, "%d size '%jd'", ENOSPC,
1218 (intmax_t)gpp->gpp_size);
1219 return (ENOSPC);
1220 }
1221 }
1222
1223 pp = entry->gpe_pp;
1224 if ((g_debugflags & 16) == 0 &&
1225 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1226 gctl_error(req, "%d", EBUSY);
1227 return (EBUSY);
1228 }
1229
1230 error = G_PART_RESIZE(table, entry, gpp);
1231 if (error) {
1232 gctl_error(req, "%d", error);
1233 return (error);
1234 }
1235
1236 if (!entry->gpe_created)
1237 entry->gpe_modified = 1;
1238
1239 /* update mediasize of changed provider */
1240 pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1241 pp->sectorsize;
1242
1243 /* Provide feedback if so requested. */
1244 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1245 sb = sbuf_new_auto();
1246 G_PART_FULLNAME(table, entry, sb, gp->name);
1247 sbuf_cat(sb, " resized\n");
1248 sbuf_finish(sb);
1249 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1250 sbuf_delete(sb);
1251 }
1252 return (0);
1253 }
1254
1255 static int
1256 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1257 unsigned int set)
1258 {
1259 struct g_geom *gp;
1260 struct g_part_entry *entry;
1261 struct g_part_table *table;
1262 struct sbuf *sb;
1263 int error;
1264
1265 gp = gpp->gpp_geom;
1266 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1267 g_topology_assert();
1268
1269 table = gp->softc;
1270
1271 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1272 if (entry->gpe_deleted || entry->gpe_internal)
1273 continue;
1274 if (entry->gpe_index == gpp->gpp_index)
1275 break;
1276 }
1277 if (entry == NULL) {
1278 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1279 return (ENOENT);
1280 }
1281
1282 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1283 if (error) {
1284 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1285 return (error);
1286 }
1287
1288 /* Provide feedback if so requested. */
1289 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1290 sb = sbuf_new_auto();
1291 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1292 (set) ? "" : "un");
1293 G_PART_FULLNAME(table, entry, sb, gp->name);
1294 sbuf_printf(sb, "\n");
1295 sbuf_finish(sb);
1296 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1297 sbuf_delete(sb);
1298 }
1299 return (0);
1300 }
1301
1302 static int
1303 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1304 {
1305 struct g_consumer *cp;
1306 struct g_provider *pp;
1307 struct g_geom *gp;
1308 struct g_part_entry *entry, *tmp;
1309 struct g_part_table *table;
1310 int error, reprobe;
1311
1312 gp = gpp->gpp_geom;
1313 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1314 g_topology_assert();
1315
1316 table = gp->softc;
1317 if (!table->gpt_opened) {
1318 gctl_error(req, "%d", EPERM);
1319 return (EPERM);
1320 }
1321
1322 cp = LIST_FIRST(&gp->consumer);
1323 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1324 entry->gpe_modified = 0;
1325 if (entry->gpe_created) {
1326 pp = entry->gpe_pp;
1327 if (pp != NULL) {
1328 pp->private = NULL;
1329 entry->gpe_pp = NULL;
1330 g_wither_provider(pp, ENXIO);
1331 }
1332 entry->gpe_deleted = 1;
1333 }
1334 if (entry->gpe_deleted) {
1335 LIST_REMOVE(entry, gpe_entry);
1336 g_free(entry);
1337 }
1338 }
1339
1340 g_topology_unlock();
1341
1342 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1343 table->gpt_created) ? 1 : 0;
1344
1345 if (reprobe) {
1346 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1347 if (entry->gpe_internal)
1348 continue;
1349 error = EBUSY;
1350 goto fail;
1351 }
1352 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1353 LIST_REMOVE(entry, gpe_entry);
1354 g_free(entry);
1355 }
1356 error = g_part_probe(gp, cp, table->gpt_depth);
1357 if (error) {
1358 g_topology_lock();
1359 g_access(cp, -1, -1, -1);
1360 g_part_wither(gp, error);
1361 return (0);
1362 }
1363 table = gp->softc;
1364
1365 /*
1366 * Synthesize a disk geometry. Some partitioning schemes
1367 * depend on it and since some file systems need it even
1368 * when the partitition scheme doesn't, we do it here in
1369 * scheme-independent code.
1370 */
1371 pp = cp->provider;
1372 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1373 }
1374
1375 error = G_PART_READ(table, cp);
1376 if (error)
1377 goto fail;
1378 error = g_part_check_integrity(table, cp);
1379 if (error)
1380 goto fail;
1381
1382 g_topology_lock();
1383 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1384 if (!entry->gpe_internal)
1385 g_part_new_provider(gp, table, entry);
1386 }
1387
1388 table->gpt_opened = 0;
1389 g_access(cp, -1, -1, -1);
1390 return (0);
1391
1392 fail:
1393 g_topology_lock();
1394 gctl_error(req, "%d", error);
1395 return (error);
1396 }
1397
1398 static void
1399 g_part_wither(struct g_geom *gp, int error)
1400 {
1401 struct g_part_entry *entry;
1402 struct g_part_table *table;
1403
1404 table = gp->softc;
1405 if (table != NULL) {
1406 G_PART_DESTROY(table, NULL);
1407 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1408 LIST_REMOVE(entry, gpe_entry);
1409 g_free(entry);
1410 }
1411 if (gp->softc != NULL) {
1412 kobj_delete((kobj_t)gp->softc, M_GEOM);
1413 gp->softc = NULL;
1414 }
1415 }
1416 g_wither_geom(gp, error);
1417 }
1418
1419 /*
1420 * Class methods.
1421 */
1422
1423 static void
1424 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1425 {
1426 struct g_part_parms gpp;
1427 struct g_part_table *table;
1428 struct gctl_req_arg *ap;
1429 const char *p;
1430 enum g_part_ctl ctlreq;
1431 unsigned int i, mparms, oparms, parm;
1432 int auto_commit, close_on_error;
1433 int error, len, modifies;
1434
1435 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1436 g_topology_assert();
1437
1438 ctlreq = G_PART_CTL_NONE;
1439 modifies = 1;
1440 mparms = 0;
1441 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1442 switch (*verb) {
1443 case 'a':
1444 if (!strcmp(verb, "add")) {
1445 ctlreq = G_PART_CTL_ADD;
1446 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1447 G_PART_PARM_START | G_PART_PARM_TYPE;
1448 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1449 }
1450 break;
1451 case 'b':
1452 if (!strcmp(verb, "bootcode")) {
1453 ctlreq = G_PART_CTL_BOOTCODE;
1454 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1455 }
1456 break;
1457 case 'c':
1458 if (!strcmp(verb, "commit")) {
1459 ctlreq = G_PART_CTL_COMMIT;
1460 mparms |= G_PART_PARM_GEOM;
1461 modifies = 0;
1462 } else if (!strcmp(verb, "create")) {
1463 ctlreq = G_PART_CTL_CREATE;
1464 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1465 oparms |= G_PART_PARM_ENTRIES;
1466 }
1467 break;
1468 case 'd':
1469 if (!strcmp(verb, "delete")) {
1470 ctlreq = G_PART_CTL_DELETE;
1471 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1472 } else if (!strcmp(verb, "destroy")) {
1473 ctlreq = G_PART_CTL_DESTROY;
1474 mparms |= G_PART_PARM_GEOM;
1475 oparms |= G_PART_PARM_FORCE;
1476 }
1477 break;
1478 case 'm':
1479 if (!strcmp(verb, "modify")) {
1480 ctlreq = G_PART_CTL_MODIFY;
1481 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1482 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1483 } else if (!strcmp(verb, "move")) {
1484 ctlreq = G_PART_CTL_MOVE;
1485 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1486 }
1487 break;
1488 case 'r':
1489 if (!strcmp(verb, "recover")) {
1490 ctlreq = G_PART_CTL_RECOVER;
1491 mparms |= G_PART_PARM_GEOM;
1492 } else if (!strcmp(verb, "resize")) {
1493 ctlreq = G_PART_CTL_RESIZE;
1494 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1495 G_PART_PARM_SIZE;
1496 }
1497 break;
1498 case 's':
1499 if (!strcmp(verb, "set")) {
1500 ctlreq = G_PART_CTL_SET;
1501 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1502 G_PART_PARM_INDEX;
1503 }
1504 break;
1505 case 'u':
1506 if (!strcmp(verb, "undo")) {
1507 ctlreq = G_PART_CTL_UNDO;
1508 mparms |= G_PART_PARM_GEOM;
1509 modifies = 0;
1510 } else if (!strcmp(verb, "unset")) {
1511 ctlreq = G_PART_CTL_UNSET;
1512 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1513 G_PART_PARM_INDEX;
1514 }
1515 break;
1516 }
1517 if (ctlreq == G_PART_CTL_NONE) {
1518 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1519 return;
1520 }
1521
1522 bzero(&gpp, sizeof(gpp));
1523 for (i = 0; i < req->narg; i++) {
1524 ap = &req->arg[i];
1525 parm = 0;
1526 switch (ap->name[0]) {
1527 case 'a':
1528 if (!strcmp(ap->name, "attrib"))
1529 parm = G_PART_PARM_ATTRIB;
1530 break;
1531 case 'b':
1532 if (!strcmp(ap->name, "bootcode"))
1533 parm = G_PART_PARM_BOOTCODE;
1534 break;
1535 case 'c':
1536 if (!strcmp(ap->name, "class"))
1537 continue;
1538 break;
1539 case 'e':
1540 if (!strcmp(ap->name, "entries"))
1541 parm = G_PART_PARM_ENTRIES;
1542 break;
1543 case 'f':
1544 if (!strcmp(ap->name, "flags"))
1545 parm = G_PART_PARM_FLAGS;
1546 else if (!strcmp(ap->name, "force"))
1547 parm = G_PART_PARM_FORCE;
1548 break;
1549 case 'g':
1550 if (!strcmp(ap->name, "geom"))
1551 parm = G_PART_PARM_GEOM;
1552 break;
1553 case 'i':
1554 if (!strcmp(ap->name, "index"))
1555 parm = G_PART_PARM_INDEX;
1556 break;
1557 case 'l':
1558 if (!strcmp(ap->name, "label"))
1559 parm = G_PART_PARM_LABEL;
1560 break;
1561 case 'o':
1562 if (!strcmp(ap->name, "output"))
1563 parm = G_PART_PARM_OUTPUT;
1564 break;
1565 case 'p':
1566 if (!strcmp(ap->name, "provider"))
1567 parm = G_PART_PARM_PROVIDER;
1568 break;
1569 case 's':
1570 if (!strcmp(ap->name, "scheme"))
1571 parm = G_PART_PARM_SCHEME;
1572 else if (!strcmp(ap->name, "size"))
1573 parm = G_PART_PARM_SIZE;
1574 else if (!strcmp(ap->name, "start"))
1575 parm = G_PART_PARM_START;
1576 break;
1577 case 't':
1578 if (!strcmp(ap->name, "type"))
1579 parm = G_PART_PARM_TYPE;
1580 break;
1581 case 'v':
1582 if (!strcmp(ap->name, "verb"))
1583 continue;
1584 else if (!strcmp(ap->name, "version"))
1585 parm = G_PART_PARM_VERSION;
1586 break;
1587 }
1588 if ((parm & (mparms | oparms)) == 0) {
1589 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1590 return;
1591 }
1592 if (parm == G_PART_PARM_BOOTCODE)
1593 p = gctl_get_param(req, ap->name, &len);
1594 else
1595 p = gctl_get_asciiparam(req, ap->name);
1596 if (p == NULL) {
1597 gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1598 return;
1599 }
1600 switch (parm) {
1601 case G_PART_PARM_ATTRIB:
1602 error = g_part_parm_str(p, &gpp.gpp_attrib);
1603 break;
1604 case G_PART_PARM_BOOTCODE:
1605 gpp.gpp_codeptr = p;
1606 gpp.gpp_codesize = len;
1607 error = 0;
1608 break;
1609 case G_PART_PARM_ENTRIES:
1610 error = g_part_parm_uint(p, &gpp.gpp_entries);
1611 break;
1612 case G_PART_PARM_FLAGS:
1613 if (p[0] == '\0')
1614 continue;
1615 error = g_part_parm_str(p, &gpp.gpp_flags);
1616 break;
1617 case G_PART_PARM_FORCE:
1618 error = g_part_parm_uint(p, &gpp.gpp_force);
1619 break;
1620 case G_PART_PARM_GEOM:
1621 error = g_part_parm_geom(p, &gpp.gpp_geom);
1622 break;
1623 case G_PART_PARM_INDEX:
1624 error = g_part_parm_uint(p, &gpp.gpp_index);
1625 break;
1626 case G_PART_PARM_LABEL:
1627 /* An empty label is always valid. */
1628 gpp.gpp_label = p;
1629 error = 0;
1630 break;
1631 case G_PART_PARM_OUTPUT:
1632 error = 0; /* Write-only parameter */
1633 break;
1634 case G_PART_PARM_PROVIDER:
1635 error = g_part_parm_provider(p, &gpp.gpp_provider);
1636 break;
1637 case G_PART_PARM_SCHEME:
1638 error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1639 break;
1640 case G_PART_PARM_SIZE:
1641 error = g_part_parm_quad(p, &gpp.gpp_size);
1642 break;
1643 case G_PART_PARM_START:
1644 error = g_part_parm_quad(p, &gpp.gpp_start);
1645 break;
1646 case G_PART_PARM_TYPE:
1647 error = g_part_parm_str(p, &gpp.gpp_type);
1648 break;
1649 case G_PART_PARM_VERSION:
1650 error = g_part_parm_uint(p, &gpp.gpp_version);
1651 break;
1652 default:
1653 error = EDOOFUS;
1654 break;
1655 }
1656 if (error) {
1657 gctl_error(req, "%d %s '%s'", error, ap->name, p);
1658 return;
1659 }
1660 gpp.gpp_parms |= parm;
1661 }
1662 if ((gpp.gpp_parms & mparms) != mparms) {
1663 parm = mparms - (gpp.gpp_parms & mparms);
1664 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1665 return;
1666 }
1667
1668 /* Obtain permissions if possible/necessary. */
1669 close_on_error = 0;
1670 table = NULL;
1671 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1672 table = gpp.gpp_geom->softc;
1673 if (table != NULL && table->gpt_corrupt &&
1674 ctlreq != G_PART_CTL_DESTROY &&
1675 ctlreq != G_PART_CTL_RECOVER) {
1676 gctl_error(req, "%d table '%s' is corrupt",
1677 EPERM, gpp.gpp_geom->name);
1678 return;
1679 }
1680 if (table != NULL && !table->gpt_opened) {
1681 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1682 1, 1, 1);
1683 if (error) {
1684 gctl_error(req, "%d geom '%s'", error,
1685 gpp.gpp_geom->name);
1686 return;
1687 }
1688 table->gpt_opened = 1;
1689 close_on_error = 1;
1690 }
1691 }
1692
1693 /* Allow the scheme to check or modify the parameters. */
1694 if (table != NULL) {
1695 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1696 if (error) {
1697 gctl_error(req, "%d pre-check failed", error);
1698 goto out;
1699 }
1700 } else
1701 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1702
1703 switch (ctlreq) {
1704 case G_PART_CTL_NONE:
1705 panic("%s", __func__);
1706 case G_PART_CTL_ADD:
1707 error = g_part_ctl_add(req, &gpp);
1708 break;
1709 case G_PART_CTL_BOOTCODE:
1710 error = g_part_ctl_bootcode(req, &gpp);
1711 break;
1712 case G_PART_CTL_COMMIT:
1713 error = g_part_ctl_commit(req, &gpp);
1714 break;
1715 case G_PART_CTL_CREATE:
1716 error = g_part_ctl_create(req, &gpp);
1717 break;
1718 case G_PART_CTL_DELETE:
1719 error = g_part_ctl_delete(req, &gpp);
1720 break;
1721 case G_PART_CTL_DESTROY:
1722 error = g_part_ctl_destroy(req, &gpp);
1723 break;
1724 case G_PART_CTL_MODIFY:
1725 error = g_part_ctl_modify(req, &gpp);
1726 break;
1727 case G_PART_CTL_MOVE:
1728 error = g_part_ctl_move(req, &gpp);
1729 break;
1730 case G_PART_CTL_RECOVER:
1731 error = g_part_ctl_recover(req, &gpp);
1732 break;
1733 case G_PART_CTL_RESIZE:
1734 error = g_part_ctl_resize(req, &gpp);
1735 break;
1736 case G_PART_CTL_SET:
1737 error = g_part_ctl_setunset(req, &gpp, 1);
1738 break;
1739 case G_PART_CTL_UNDO:
1740 error = g_part_ctl_undo(req, &gpp);
1741 break;
1742 case G_PART_CTL_UNSET:
1743 error = g_part_ctl_setunset(req, &gpp, 0);
1744 break;
1745 }
1746
1747 /* Implement automatic commit. */
1748 if (!error) {
1749 auto_commit = (modifies &&
1750 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1751 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1752 if (auto_commit) {
1753 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1754 error = g_part_ctl_commit(req, &gpp);
1755 }
1756 }
1757
1758 out:
1759 if (error && close_on_error) {
1760 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1761 table->gpt_opened = 0;
1762 }
1763 }
1764
1765 static int
1766 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1767 struct g_geom *gp)
1768 {
1769
1770 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1771 g_topology_assert();
1772
1773 g_part_wither(gp, EINVAL);
1774 return (0);
1775 }
1776
1777 static struct g_geom *
1778 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1779 {
1780 struct g_consumer *cp;
1781 struct g_geom *gp;
1782 struct g_part_entry *entry;
1783 struct g_part_table *table;
1784 struct root_hold_token *rht;
1785 int attr, depth;
1786 int error;
1787
1788 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1789 g_topology_assert();
1790
1791 /* Skip providers that are already open for writing. */
1792 if (pp->acw > 0)
1793 return (NULL);
1794
1795 /*
1796 * Create a GEOM with consumer and hook it up to the provider.
1797 * With that we become part of the topology. Optain read access
1798 * to the provider.
1799 */
1800 gp = g_new_geomf(mp, "%s", pp->name);
1801 cp = g_new_consumer(gp);
1802 error = g_attach(cp, pp);
1803 if (error == 0)
1804 error = g_access(cp, 1, 0, 0);
1805 if (error != 0) {
1806 g_part_wither(gp, error);
1807 return (NULL);
1808 }
1809
1810 rht = root_mount_hold(mp->name);
1811 g_topology_unlock();
1812
1813 /*
1814 * Short-circuit the whole probing galore when there's no
1815 * media present.
1816 */
1817 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1818 error = ENODEV;
1819 goto fail;
1820 }
1821
1822 /* Make sure we can nest and if so, determine our depth. */
1823 error = g_getattr("PART::isleaf", cp, &attr);
1824 if (!error && attr) {
1825 error = ENODEV;
1826 goto fail;
1827 }
1828 error = g_getattr("PART::depth", cp, &attr);
1829 depth = (!error) ? attr + 1 : 0;
1830
1831 error = g_part_probe(gp, cp, depth);
1832 if (error)
1833 goto fail;
1834
1835 table = gp->softc;
1836
1837 /*
1838 * Synthesize a disk geometry. Some partitioning schemes
1839 * depend on it and since some file systems need it even
1840 * when the partitition scheme doesn't, we do it here in
1841 * scheme-independent code.
1842 */
1843 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1844
1845 error = G_PART_READ(table, cp);
1846 if (error)
1847 goto fail;
1848 error = g_part_check_integrity(table, cp);
1849 if (error)
1850 goto fail;
1851
1852 g_topology_lock();
1853 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1854 if (!entry->gpe_internal)
1855 g_part_new_provider(gp, table, entry);
1856 }
1857
1858 root_mount_rel(rht);
1859 g_access(cp, -1, 0, 0);
1860 return (gp);
1861
1862 fail:
1863 g_topology_lock();
1864 root_mount_rel(rht);
1865 g_access(cp, -1, 0, 0);
1866 g_part_wither(gp, error);
1867 return (NULL);
1868 }
1869
1870 /*
1871 * Geom methods.
1872 */
1873
1874 static int
1875 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1876 {
1877 struct g_consumer *cp;
1878
1879 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1880 dw, de));
1881
1882 cp = LIST_FIRST(&pp->geom->consumer);
1883
1884 /* We always gain write-exclusive access. */
1885 return (g_access(cp, dr, dw, dw + de));
1886 }
1887
1888 static void
1889 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1890 struct g_consumer *cp, struct g_provider *pp)
1891 {
1892 char buf[64];
1893 struct g_part_entry *entry;
1894 struct g_part_table *table;
1895
1896 KASSERT(sb != NULL && gp != NULL, (__func__));
1897 table = gp->softc;
1898
1899 if (indent == NULL) {
1900 KASSERT(cp == NULL && pp != NULL, (__func__));
1901 entry = pp->private;
1902 if (entry == NULL)
1903 return;
1904 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1905 (uintmax_t)entry->gpe_offset,
1906 G_PART_TYPE(table, entry, buf, sizeof(buf)));
1907 /*
1908 * libdisk compatibility quirk - the scheme dumps the
1909 * slicer name and partition type in a way that is
1910 * compatible with libdisk. When libdisk is not used
1911 * anymore, this should go away.
1912 */
1913 G_PART_DUMPCONF(table, entry, sb, indent);
1914 } else if (cp != NULL) { /* Consumer configuration. */
1915 KASSERT(pp == NULL, (__func__));
1916 /* none */
1917 } else if (pp != NULL) { /* Provider configuration. */
1918 entry = pp->private;
1919 if (entry == NULL)
1920 return;
1921 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
1922 (uintmax_t)entry->gpe_start);
1923 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
1924 (uintmax_t)entry->gpe_end);
1925 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1926 entry->gpe_index);
1927 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1928 G_PART_TYPE(table, entry, buf, sizeof(buf)));
1929 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1930 (uintmax_t)entry->gpe_offset);
1931 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1932 (uintmax_t)pp->mediasize);
1933 G_PART_DUMPCONF(table, entry, sb, indent);
1934 } else { /* Geom configuration. */
1935 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1936 table->gpt_scheme->name);
1937 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1938 table->gpt_entries);
1939 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1940 (uintmax_t)table->gpt_first);
1941 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1942 (uintmax_t)table->gpt_last);
1943 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1944 table->gpt_sectors);
1945 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1946 table->gpt_heads);
1947 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
1948 table->gpt_corrupt ? "CORRUPT": "OK");
1949 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
1950 table->gpt_opened ? "true": "false");
1951 G_PART_DUMPCONF(table, NULL, sb, indent);
1952 }
1953 }
1954
1955 static void
1956 g_part_orphan(struct g_consumer *cp)
1957 {
1958 struct g_provider *pp;
1959 struct g_part_table *table;
1960
1961 pp = cp->provider;
1962 KASSERT(pp != NULL, (__func__));
1963 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1964 g_topology_assert();
1965
1966 KASSERT(pp->error != 0, (__func__));
1967 table = cp->geom->softc;
1968 if (table != NULL && table->gpt_opened)
1969 g_access(cp, -1, -1, -1);
1970 g_part_wither(cp->geom, pp->error);
1971 }
1972
1973 static void
1974 g_part_spoiled(struct g_consumer *cp)
1975 {
1976
1977 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1978 g_topology_assert();
1979
1980 g_part_wither(cp->geom, ENXIO);
1981 }
1982
1983 static void
1984 g_part_start(struct bio *bp)
1985 {
1986 struct bio *bp2;
1987 struct g_consumer *cp;
1988 struct g_geom *gp;
1989 struct g_part_entry *entry;
1990 struct g_part_table *table;
1991 struct g_kerneldump *gkd;
1992 struct g_provider *pp;
1993
1994 pp = bp->bio_to;
1995 gp = pp->geom;
1996 table = gp->softc;
1997 cp = LIST_FIRST(&gp->consumer);
1998
1999 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2000 pp->name));
2001
2002 entry = pp->private;
2003 if (entry == NULL) {
2004 g_io_deliver(bp, ENXIO);
2005 return;
2006 }
2007
2008 switch(bp->bio_cmd) {
2009 case BIO_DELETE:
2010 case BIO_READ:
2011 case BIO_WRITE:
2012 if (bp->bio_offset >= pp->mediasize) {
2013 g_io_deliver(bp, EIO);
2014 return;
2015 }
2016 bp2 = g_clone_bio(bp);
2017 if (bp2 == NULL) {
2018 g_io_deliver(bp, ENOMEM);
2019 return;
2020 }
2021 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2022 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2023 bp2->bio_done = g_std_done;
2024 bp2->bio_offset += entry->gpe_offset;
2025 g_io_request(bp2, cp);
2026 return;
2027 case BIO_FLUSH:
2028 break;
2029 case BIO_GETATTR:
2030 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2031 return;
2032 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2033 return;
2034 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2035 return;
2036 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2037 return;
2038 if (g_handleattr_str(bp, "PART::scheme",
2039 table->gpt_scheme->name))
2040 return;
2041 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2042 /*
2043 * Check that the partition is suitable for kernel
2044 * dumps. Typically only swap partitions should be
2045 * used.
2046 */
2047 if (!G_PART_DUMPTO(table, entry)) {
2048 g_io_deliver(bp, ENODEV);
2049 printf("GEOM_PART: Partition '%s' not suitable"
2050 " for kernel dumps (wrong type?)\n",
2051 pp->name);
2052 return;
2053 }
2054 gkd = (struct g_kerneldump *)bp->bio_data;
2055 if (gkd->offset >= pp->mediasize) {
2056 g_io_deliver(bp, EIO);
2057 return;
2058 }
2059 if (gkd->offset + gkd->length > pp->mediasize)
2060 gkd->length = pp->mediasize - gkd->offset;
2061 gkd->offset += entry->gpe_offset;
2062 }
2063 break;
2064 default:
2065 g_io_deliver(bp, EOPNOTSUPP);
2066 return;
2067 }
2068
2069 bp2 = g_clone_bio(bp);
2070 if (bp2 == NULL) {
2071 g_io_deliver(bp, ENOMEM);
2072 return;
2073 }
2074 bp2->bio_done = g_std_done;
2075 g_io_request(bp2, cp);
2076 }
2077
2078 static void
2079 g_part_init(struct g_class *mp)
2080 {
2081
2082 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2083 }
2084
2085 static void
2086 g_part_fini(struct g_class *mp)
2087 {
2088
2089 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2090 }
2091
2092 static void
2093 g_part_unload_event(void *arg, int flag)
2094 {
2095 struct g_consumer *cp;
2096 struct g_geom *gp;
2097 struct g_provider *pp;
2098 struct g_part_scheme *scheme;
2099 struct g_part_table *table;
2100 uintptr_t *xchg;
2101 int acc, error;
2102
2103 if (flag == EV_CANCEL)
2104 return;
2105
2106 xchg = arg;
2107 error = 0;
2108 scheme = (void *)(*xchg);
2109
2110 g_topology_assert();
2111
2112 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2113 table = gp->softc;
2114 if (table->gpt_scheme != scheme)
2115 continue;
2116
2117 acc = 0;
2118 LIST_FOREACH(pp, &gp->provider, provider)
2119 acc += pp->acr + pp->acw + pp->ace;
2120 LIST_FOREACH(cp, &gp->consumer, consumer)
2121 acc += cp->acr + cp->acw + cp->ace;
2122
2123 if (!acc)
2124 g_part_wither(gp, ENOSYS);
2125 else
2126 error = EBUSY;
2127 }
2128
2129 if (!error)
2130 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2131
2132 *xchg = error;
2133 }
2134
2135 int
2136 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2137 {
2138 uintptr_t arg;
2139 int error;
2140
2141 switch (type) {
2142 case MOD_LOAD:
2143 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
2144
2145 error = g_retaste(&g_part_class);
2146 if (error)
2147 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2148 break;
2149 case MOD_UNLOAD:
2150 arg = (uintptr_t)scheme;
2151 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2152 NULL);
2153 if (!error)
2154 error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;
2155 break;
2156 default:
2157 error = EOPNOTSUPP;
2158 break;
2159 }
2160
2161 return (error);
2162 }
Cache object: 051e264917f244caea11da617d2eef1c
|