1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018 Intel Corporation
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted providing that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/sbuf.h>
34 #include <sys/module.h>
35 #include <sys/systm.h>
36 #include <sys/errno.h>
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/smp.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44
45 #include <machine/cpu.h>
46 #include <machine/md_var.h>
47 #include <machine/cputypes.h>
48 #include <machine/specialreg.h>
49
50 #include <contrib/dev/acpica/include/acpi.h>
51
52 #include <dev/acpica/acpivar.h>
53
54 #include <x86/cpufreq/hwpstate_intel_internal.h>
55
56 #include "acpi_if.h"
57 #include "cpufreq_if.h"
58
59 extern uint64_t tsc_freq;
60
61 static int intel_hwpstate_probe(device_t dev);
62 static int intel_hwpstate_attach(device_t dev);
63 static int intel_hwpstate_detach(device_t dev);
64 static int intel_hwpstate_suspend(device_t dev);
65 static int intel_hwpstate_resume(device_t dev);
66
67 static int intel_hwpstate_get(device_t dev, struct cf_setting *cf);
68 static int intel_hwpstate_type(device_t dev, int *type);
69
70 static device_method_t intel_hwpstate_methods[] = {
71 /* Device interface */
72 DEVMETHOD(device_identify, intel_hwpstate_identify),
73 DEVMETHOD(device_probe, intel_hwpstate_probe),
74 DEVMETHOD(device_attach, intel_hwpstate_attach),
75 DEVMETHOD(device_detach, intel_hwpstate_detach),
76 DEVMETHOD(device_suspend, intel_hwpstate_suspend),
77 DEVMETHOD(device_resume, intel_hwpstate_resume),
78
79 /* cpufreq interface */
80 DEVMETHOD(cpufreq_drv_get, intel_hwpstate_get),
81 DEVMETHOD(cpufreq_drv_type, intel_hwpstate_type),
82
83 DEVMETHOD_END
84 };
85
86 struct hwp_softc {
87 device_t dev;
88 bool hwp_notifications;
89 bool hwp_activity_window;
90 bool hwp_pref_ctrl;
91 bool hwp_pkg_ctrl;
92 bool hwp_pkg_ctrl_en;
93 bool hwp_perf_bias;
94 bool hwp_perf_bias_cached;
95
96 uint64_t req; /* Cached copy of HWP_REQUEST */
97 uint64_t hwp_energy_perf_bias; /* Cache PERF_BIAS */
98
99 uint8_t high;
100 uint8_t guaranteed;
101 uint8_t efficient;
102 uint8_t low;
103 };
104
105 static driver_t hwpstate_intel_driver = {
106 "hwpstate_intel",
107 intel_hwpstate_methods,
108 sizeof(struct hwp_softc),
109 };
110
111 DRIVER_MODULE(hwpstate_intel, cpu, hwpstate_intel_driver, NULL, NULL);
112 MODULE_VERSION(hwpstate_intel, 1);
113
114 static bool hwpstate_pkg_ctrl_enable = true;
115 SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_pkg_ctrl, CTLFLAG_RDTUN,
116 &hwpstate_pkg_ctrl_enable, 0,
117 "Set 1 (default) to enable package-level control, 0 to disable");
118
119 static int
120 intel_hwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
121 {
122 device_t dev;
123 struct pcpu *pc;
124 struct sbuf *sb;
125 struct hwp_softc *sc;
126 uint64_t data, data2;
127 int ret;
128
129 sc = (struct hwp_softc *)arg1;
130 dev = sc->dev;
131
132 pc = cpu_get_pcpu(dev);
133 if (pc == NULL)
134 return (ENXIO);
135
136 sb = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
137 sbuf_putc(sb, '\n');
138 thread_lock(curthread);
139 sched_bind(curthread, pc->pc_cpuid);
140 thread_unlock(curthread);
141
142 rdmsr_safe(MSR_IA32_PM_ENABLE, &data);
143 sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
144 ((data & 1) ? "En" : "Dis"));
145
146 if (data == 0) {
147 ret = 0;
148 goto out;
149 }
150
151 rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &data);
152 sbuf_printf(sb, "\tHighest Performance: %03ju\n", data & 0xff);
153 sbuf_printf(sb, "\tGuaranteed Performance: %03ju\n", (data >> 8) & 0xff);
154 sbuf_printf(sb, "\tEfficient Performance: %03ju\n", (data >> 16) & 0xff);
155 sbuf_printf(sb, "\tLowest Performance: %03ju\n", (data >> 24) & 0xff);
156
157 rdmsr_safe(MSR_IA32_HWP_REQUEST, &data);
158 data2 = 0;
159 if (sc->hwp_pkg_ctrl && (data & IA32_HWP_REQUEST_PACKAGE_CONTROL))
160 rdmsr_safe(MSR_IA32_HWP_REQUEST_PKG, &data2);
161
162 sbuf_putc(sb, '\n');
163
164 #define pkg_print(x, name, offset) do { \
165 if (!sc->hwp_pkg_ctrl || (data & x) != 0) \
166 sbuf_printf(sb, "\t%s: %03u\n", name, \
167 (unsigned)(data >> offset) & 0xff); \
168 else \
169 sbuf_printf(sb, "\t%s: %03u\n", name, \
170 (unsigned)(data2 >> offset) & 0xff); \
171 } while (0)
172
173 pkg_print(IA32_HWP_REQUEST_EPP_VALID,
174 "Requested Efficiency Performance Preference", 24);
175 pkg_print(IA32_HWP_REQUEST_DESIRED_VALID,
176 "Requested Desired Performance", 16);
177 pkg_print(IA32_HWP_REQUEST_MAXIMUM_VALID,
178 "Requested Maximum Performance", 8);
179 pkg_print(IA32_HWP_REQUEST_MINIMUM_VALID,
180 "Requested Minimum Performance", 0);
181 #undef pkg_print
182
183 sbuf_putc(sb, '\n');
184
185 out:
186 thread_lock(curthread);
187 sched_unbind(curthread);
188 thread_unlock(curthread);
189
190 ret = sbuf_finish(sb);
191 if (ret == 0)
192 ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
193 sbuf_delete(sb);
194
195 return (ret);
196 }
197
198 static inline int
199 percent_to_raw(int x)
200 {
201
202 MPASS(x <= 100 && x >= 0);
203 return (0xff * x / 100);
204 }
205
206 /*
207 * Given x * 10 in [0, 1000], round to the integer nearest x.
208 *
209 * This allows round-tripping nice human readable numbers through this
210 * interface. Otherwise, user-provided percentages such as 25, 50, 75 get
211 * rounded down to 24, 49, and 74, which is a bit ugly.
212 */
213 static inline int
214 round10(int xtimes10)
215 {
216 return ((xtimes10 + 5) / 10);
217 }
218
219 static inline int
220 raw_to_percent(int x)
221 {
222 MPASS(x <= 0xff && x >= 0);
223 return (round10(x * 1000 / 0xff));
224 }
225
226 /* Range of MSR_IA32_ENERGY_PERF_BIAS is more limited: 0-0xf. */
227 static inline int
228 percent_to_raw_perf_bias(int x)
229 {
230 /*
231 * Round up so that raw values present as nice round human numbers and
232 * also round-trip to the same raw value.
233 */
234 MPASS(x <= 100 && x >= 0);
235 return (((0xf * x) + 50) / 100);
236 }
237
238 static inline int
239 raw_to_percent_perf_bias(int x)
240 {
241 /* Rounding to nice human numbers despite a step interval of 6.67%. */
242 MPASS(x <= 0xf && x >= 0);
243 return (((x * 20) / 0xf) * 5);
244 }
245
246 static int
247 sysctl_epp_select(SYSCTL_HANDLER_ARGS)
248 {
249 struct hwp_softc *sc;
250 device_t dev;
251 struct pcpu *pc;
252 uint64_t epb;
253 uint32_t val;
254 int ret;
255
256 dev = oidp->oid_arg1;
257 sc = device_get_softc(dev);
258 if (!sc->hwp_pref_ctrl && !sc->hwp_perf_bias)
259 return (ENODEV);
260
261 pc = cpu_get_pcpu(dev);
262 if (pc == NULL)
263 return (ENXIO);
264
265 thread_lock(curthread);
266 sched_bind(curthread, pc->pc_cpuid);
267 thread_unlock(curthread);
268
269 if (sc->hwp_pref_ctrl) {
270 val = (sc->req & IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE) >> 24;
271 val = raw_to_percent(val);
272 } else {
273 /*
274 * If cpuid indicates EPP is not supported, the HWP controller
275 * uses MSR_IA32_ENERGY_PERF_BIAS instead (Intel SDM §14.4.4).
276 * This register is per-core (but not HT).
277 */
278 if (!sc->hwp_perf_bias_cached) {
279 ret = rdmsr_safe(MSR_IA32_ENERGY_PERF_BIAS, &epb);
280 if (ret)
281 goto out;
282 sc->hwp_energy_perf_bias = epb;
283 sc->hwp_perf_bias_cached = true;
284 }
285 val = sc->hwp_energy_perf_bias &
286 IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK;
287 val = raw_to_percent_perf_bias(val);
288 }
289
290 MPASS(val >= 0 && val <= 100);
291
292 ret = sysctl_handle_int(oidp, &val, 0, req);
293 if (ret || req->newptr == NULL)
294 goto out;
295
296 if (val > 100) {
297 ret = EINVAL;
298 goto out;
299 }
300
301 if (sc->hwp_pref_ctrl) {
302 val = percent_to_raw(val);
303
304 sc->req =
305 ((sc->req & ~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE)
306 | (val << 24u));
307
308 if (sc->hwp_pkg_ctrl_en)
309 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
310 else
311 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
312 } else {
313 val = percent_to_raw_perf_bias(val);
314 MPASS((val & ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) == 0);
315
316 sc->hwp_energy_perf_bias =
317 ((sc->hwp_energy_perf_bias &
318 ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) | val);
319 ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
320 sc->hwp_energy_perf_bias);
321 }
322
323 out:
324 thread_lock(curthread);
325 sched_unbind(curthread);
326 thread_unlock(curthread);
327
328 return (ret);
329 }
330
331 void
332 intel_hwpstate_identify(driver_t *driver, device_t parent)
333 {
334 if (device_find_child(parent, "hwpstate_intel", -1) != NULL)
335 return;
336
337 if (cpu_vendor_id != CPU_VENDOR_INTEL)
338 return;
339
340 if (resource_disabled("hwpstate_intel", 0))
341 return;
342
343 /*
344 * Intel SDM 14.4.1 (HWP Programming Interfaces):
345 * Availability of HWP baseline resource and capability,
346 * CPUID.06H:EAX[bit 7]: If this bit is set, HWP provides several new
347 * architectural MSRs: IA32_PM_ENABLE, IA32_HWP_CAPABILITIES,
348 * IA32_HWP_REQUEST, IA32_HWP_STATUS.
349 */
350 if ((cpu_power_eax & CPUTPM1_HWP) == 0)
351 return;
352
353 if (BUS_ADD_CHILD(parent, 10, "hwpstate_intel", device_get_unit(parent))
354 == NULL)
355 device_printf(parent, "hwpstate_intel: add child failed\n");
356 }
357
358 static int
359 intel_hwpstate_probe(device_t dev)
360 {
361
362 device_set_desc(dev, "Intel Speed Shift");
363 return (BUS_PROBE_NOWILDCARD);
364 }
365
366 static int
367 set_autonomous_hwp(struct hwp_softc *sc)
368 {
369 struct pcpu *pc;
370 device_t dev;
371 uint64_t caps;
372 int ret;
373
374 dev = sc->dev;
375
376 pc = cpu_get_pcpu(dev);
377 if (pc == NULL)
378 return (ENXIO);
379
380 thread_lock(curthread);
381 sched_bind(curthread, pc->pc_cpuid);
382 thread_unlock(curthread);
383
384 /* XXX: Many MSRs aren't readable until feature is enabled */
385 ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
386 if (ret) {
387 /*
388 * This is actually a package-level MSR, and only the first
389 * write is not ignored. So it is harmless to enable it across
390 * all devices, and this allows us not to care especially in
391 * which order cores (and packages) are probed. This error
392 * condition should not happen given we gate on the HWP CPUID
393 * feature flag, if the Intel SDM is correct.
394 */
395 device_printf(dev, "Failed to enable HWP for cpu%d (%d)\n",
396 pc->pc_cpuid, ret);
397 goto out;
398 }
399
400 ret = rdmsr_safe(MSR_IA32_HWP_REQUEST, &sc->req);
401 if (ret) {
402 device_printf(dev,
403 "Failed to read HWP request MSR for cpu%d (%d)\n",
404 pc->pc_cpuid, ret);
405 goto out;
406 }
407
408 ret = rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &caps);
409 if (ret) {
410 device_printf(dev,
411 "Failed to read HWP capabilities MSR for cpu%d (%d)\n",
412 pc->pc_cpuid, ret);
413 goto out;
414 }
415
416 /*
417 * High and low are static; "guaranteed" is dynamic; and efficient is
418 * also dynamic.
419 */
420 sc->high = IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(caps);
421 sc->guaranteed = IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(caps);
422 sc->efficient = IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(caps);
423 sc->low = IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(caps);
424
425 /* hardware autonomous selection determines the performance target */
426 sc->req &= ~IA32_HWP_DESIRED_PERFORMANCE;
427
428 /* enable HW dynamic selection of window size */
429 sc->req &= ~IA32_HWP_ACTIVITY_WINDOW;
430
431 /* IA32_HWP_REQUEST.Minimum_Performance = IA32_HWP_CAPABILITIES.Lowest_Performance */
432 sc->req &= ~IA32_HWP_MINIMUM_PERFORMANCE;
433 sc->req |= sc->low;
434
435 /* IA32_HWP_REQUEST.Maximum_Performance = IA32_HWP_CAPABILITIES.Highest_Performance. */
436 sc->req &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
437 sc->req |= sc->high << 8;
438
439 /* If supported, request package-level control for this CPU. */
440 if (sc->hwp_pkg_ctrl_en)
441 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
442 IA32_HWP_REQUEST_PACKAGE_CONTROL);
443 else
444 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
445 if (ret) {
446 device_printf(dev,
447 "Failed to setup%s autonomous HWP for cpu%d\n",
448 sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
449 goto out;
450 }
451
452 /* If supported, write the PKG-wide control MSR. */
453 if (sc->hwp_pkg_ctrl_en) {
454 /*
455 * "The structure of the IA32_HWP_REQUEST_PKG MSR
456 * (package-level) is identical to the IA32_HWP_REQUEST MSR
457 * with the exception of the Package Control field, which does
458 * not exist." (Intel SDM §14.4.4)
459 */
460 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
461 if (ret) {
462 device_printf(dev,
463 "Failed to set autonomous HWP for package\n");
464 }
465 }
466
467 out:
468 thread_lock(curthread);
469 sched_unbind(curthread);
470 thread_unlock(curthread);
471
472 return (ret);
473 }
474
475 static int
476 intel_hwpstate_attach(device_t dev)
477 {
478 struct hwp_softc *sc;
479 int ret;
480
481 sc = device_get_softc(dev);
482 sc->dev = dev;
483
484 /* eax */
485 if (cpu_power_eax & CPUTPM1_HWP_NOTIFICATION)
486 sc->hwp_notifications = true;
487 if (cpu_power_eax & CPUTPM1_HWP_ACTIVITY_WINDOW)
488 sc->hwp_activity_window = true;
489 if (cpu_power_eax & CPUTPM1_HWP_PERF_PREF)
490 sc->hwp_pref_ctrl = true;
491 if (cpu_power_eax & CPUTPM1_HWP_PKG)
492 sc->hwp_pkg_ctrl = true;
493
494 /* Allow administrators to disable pkg-level control. */
495 sc->hwp_pkg_ctrl_en = (sc->hwp_pkg_ctrl && hwpstate_pkg_ctrl_enable);
496
497 /* ecx */
498 if (cpu_power_ecx & CPUID_PERF_BIAS)
499 sc->hwp_perf_bias = true;
500
501 ret = set_autonomous_hwp(sc);
502 if (ret)
503 return (ret);
504
505 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
506 SYSCTL_STATIC_CHILDREN(_debug), OID_AUTO, device_get_nameunit(dev),
507 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
508 sc, 0, intel_hwp_dump_sysctl_handler, "A", "");
509
510 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
511 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
512 "epp", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, dev, 0,
513 sysctl_epp_select, "I",
514 "Efficiency/Performance Preference "
515 "(range from 0, most performant, through 100, most efficient)");
516
517 return (cpufreq_register(dev));
518 }
519
520 static int
521 intel_hwpstate_detach(device_t dev)
522 {
523
524 return (cpufreq_unregister(dev));
525 }
526
527 static int
528 intel_hwpstate_get(device_t dev, struct cf_setting *set)
529 {
530 struct pcpu *pc;
531 uint64_t rate;
532 int ret;
533
534 if (set == NULL)
535 return (EINVAL);
536
537 pc = cpu_get_pcpu(dev);
538 if (pc == NULL)
539 return (ENXIO);
540
541 memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
542 set->dev = dev;
543
544 ret = cpu_est_clockrate(pc->pc_cpuid, &rate);
545 if (ret == 0)
546 set->freq = rate / 1000000;
547
548 set->volts = CPUFREQ_VAL_UNKNOWN;
549 set->power = CPUFREQ_VAL_UNKNOWN;
550 set->lat = CPUFREQ_VAL_UNKNOWN;
551
552 return (0);
553 }
554
555 static int
556 intel_hwpstate_type(device_t dev, int *type)
557 {
558 if (type == NULL)
559 return (EINVAL);
560 *type = CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED;
561
562 return (0);
563 }
564
565 static int
566 intel_hwpstate_suspend(device_t dev)
567 {
568 return (0);
569 }
570
571 /*
572 * Redo a subset of set_autonomous_hwp on resume; untested. Without this,
573 * testers observed that on resume MSR_IA32_HWP_REQUEST was bogus.
574 */
575 static int
576 intel_hwpstate_resume(device_t dev)
577 {
578 struct hwp_softc *sc;
579 struct pcpu *pc;
580 int ret;
581
582 sc = device_get_softc(dev);
583
584 pc = cpu_get_pcpu(dev);
585 if (pc == NULL)
586 return (ENXIO);
587
588 thread_lock(curthread);
589 sched_bind(curthread, pc->pc_cpuid);
590 thread_unlock(curthread);
591
592 ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
593 if (ret) {
594 device_printf(dev,
595 "Failed to enable HWP for cpu%d after suspend (%d)\n",
596 pc->pc_cpuid, ret);
597 goto out;
598 }
599
600 if (sc->hwp_pkg_ctrl_en)
601 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
602 IA32_HWP_REQUEST_PACKAGE_CONTROL);
603 else
604 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
605 if (ret) {
606 device_printf(dev,
607 "Failed to set%s autonomous HWP for cpu%d after suspend\n",
608 sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
609 goto out;
610 }
611 if (sc->hwp_pkg_ctrl_en) {
612 ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
613 if (ret) {
614 device_printf(dev,
615 "Failed to set autonomous HWP for package after "
616 "suspend\n");
617 goto out;
618 }
619 }
620 if (!sc->hwp_pref_ctrl && sc->hwp_perf_bias_cached) {
621 ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
622 sc->hwp_energy_perf_bias);
623 if (ret) {
624 device_printf(dev,
625 "Failed to set energy perf bias for cpu%d after "
626 "suspend\n", pc->pc_cpuid);
627 }
628 }
629
630 out:
631 thread_lock(curthread);
632 sched_unbind(curthread);
633 thread_unlock(curthread);
634
635 return (ret);
636 }
Cache object: 6992126c7c30cd5df223544ceff28303
|