1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_cfg_common.h"
7 #include "adf_transport_internal.h"
8 #include "icp_qat_hw.h"
9 #include "adf_c4xxx_hw_data.h"
10
11 #define ADF_C4XXX_PARTTITION_SHIFT 8
12 #define ADF_C4XXX_PARTITION(svc, ring) \
13 ((svc) << ((ring)*ADF_C4XXX_PARTTITION_SHIFT))
14
15 static void
16 adf_get_partitions_mask(struct adf_accel_dev *accel_dev, u32 *partitions_mask)
17 {
18 device_t dev = accel_to_pci_dev(accel_dev);
19 u32 enabled_partitions_msk = 0;
20 u8 ring_pair = 0;
21 enum adf_cfg_service_type serv_type = 0;
22 u16 ring_to_svc_map = accel_dev->hw_device->ring_to_svc_map;
23
24 for (ring_pair = 0; ring_pair < ADF_CFG_NUM_SERVICES; ring_pair++) {
25 serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair);
26 switch (serv_type) {
27 case CRYPTO: {
28 enabled_partitions_msk |=
29 ADF_C4XXX_PARTITION(ADF_C4XXX_PART_ASYM,
30 ring_pair++);
31 if (ring_pair < ADF_CFG_NUM_SERVICES)
32 enabled_partitions_msk |=
33 ADF_C4XXX_PARTITION(ADF_C4XXX_PART_SYM,
34 ring_pair);
35 else
36 device_printf(
37 dev, "Failed to enable SYM partition.\n");
38 break;
39 }
40 case COMP:
41 enabled_partitions_msk |=
42 ADF_C4XXX_PARTITION(ADF_C4XXX_PART_DC, ring_pair);
43 break;
44 case SYM:
45 enabled_partitions_msk |=
46 ADF_C4XXX_PARTITION(ADF_C4XXX_PART_SYM, ring_pair);
47 break;
48 case ASYM:
49 enabled_partitions_msk |=
50 ADF_C4XXX_PARTITION(ADF_C4XXX_PART_ASYM, ring_pair);
51 break;
52 default:
53 enabled_partitions_msk |=
54 ADF_C4XXX_PARTITION(ADF_C4XXX_PART_UNUSED,
55 ring_pair);
56 break;
57 }
58 }
59 *partitions_mask = enabled_partitions_msk;
60 }
61
62 static void
63 adf_enable_sym_threads(struct adf_accel_dev *accel_dev, u32 ae, u32 partition)
64 {
65 struct resource *csr = accel_dev->transport->banks[0].csr_addr;
66 const struct adf_ae_info *ae_info = accel_dev->au_info->ae_info;
67 u32 num_sym_thds = ae_info[ae].num_sym_thd;
68 u32 i;
69 u32 part_group = partition / ADF_C4XXX_PARTS_PER_GRP;
70 u32 wkrthd2_partmap = part_group << ADF_C4XXX_PARTS_PER_GRP |
71 (BIT(partition % ADF_C4XXX_PARTS_PER_GRP));
72
73 for (i = 0; i < num_sym_thds; i++)
74 WRITE_CSR_WQM(csr,
75 ADF_C4XXX_WRKTHD2PARTMAP,
76 (ae * ADF_NUM_THREADS_PER_AE + i),
77 wkrthd2_partmap);
78 }
79
80 static void
81 adf_enable_asym_threads(struct adf_accel_dev *accel_dev, u32 ae, u32 partition)
82 {
83 struct resource *csr = accel_dev->transport->banks[0].csr_addr;
84 const struct adf_ae_info *ae_info = accel_dev->au_info->ae_info;
85 u32 num_asym_thds = ae_info[ae].num_asym_thd;
86 u32 i;
87 u32 part_group = partition / ADF_C4XXX_PARTS_PER_GRP;
88 u32 wkrthd2_partmap = part_group << ADF_C4XXX_PARTS_PER_GRP |
89 (BIT(partition % ADF_C4XXX_PARTS_PER_GRP));
90 /* For asymmetric cryptography SKU we have one thread less */
91 u32 num_all_thds = ADF_NUM_THREADS_PER_AE - 2;
92
93 for (i = num_all_thds; i > (num_all_thds - num_asym_thds); i--)
94 WRITE_CSR_WQM(csr,
95 ADF_C4XXX_WRKTHD2PARTMAP,
96 (ae * ADF_NUM_THREADS_PER_AE + i),
97 wkrthd2_partmap);
98 }
99
100 static void
101 adf_enable_dc_threads(struct adf_accel_dev *accel_dev, u32 ae, u32 partition)
102 {
103 struct resource *csr = accel_dev->transport->banks[0].csr_addr;
104 const struct adf_ae_info *ae_info = accel_dev->au_info->ae_info;
105 u32 num_dc_thds = ae_info[ae].num_dc_thd;
106 u32 i;
107 u32 part_group = partition / ADF_C4XXX_PARTS_PER_GRP;
108 u32 wkrthd2_partmap = part_group << ADF_C4XXX_PARTS_PER_GRP |
109 (BIT(partition % ADF_C4XXX_PARTS_PER_GRP));
110
111 for (i = 0; i < num_dc_thds; i++)
112 WRITE_CSR_WQM(csr,
113 ADF_C4XXX_WRKTHD2PARTMAP,
114 (ae * ADF_NUM_THREADS_PER_AE + i),
115 wkrthd2_partmap);
116 }
117
118 /* Initialise Resource partitioning.
119 * Initialise a default set of 4 partitions to arbitrate
120 * request rings per bundle.
121 */
122 int
123 adf_init_arb_c4xxx(struct adf_accel_dev *accel_dev)
124 {
125 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
126 struct resource *csr = accel_dev->transport->banks[0].csr_addr;
127 struct adf_accel_unit_info *au_info = accel_dev->au_info;
128 u32 i;
129 unsigned long ae_mask;
130 u32 partitions_mask = 0;
131
132 /* invoke common adf_init_arb */
133 adf_init_arb(accel_dev);
134
135 adf_get_partitions_mask(accel_dev, &partitions_mask);
136 for (i = 0; i < hw_data->num_banks; i++)
137 WRITE_CSR_WQM(csr,
138 ADF_C4XXX_PARTITION_LUT_OFFSET,
139 i,
140 partitions_mask);
141
142 ae_mask = hw_data->ae_mask;
143
144 /* Assigning default partitions to accel engine
145 * worker threads
146 */
147 for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES)
148 {
149 if (BIT(i) & au_info->sym_ae_msk)
150 adf_enable_sym_threads(accel_dev,
151 i,
152 ADF_C4XXX_PART_SYM);
153 if (BIT(i) & au_info->asym_ae_msk)
154 adf_enable_asym_threads(accel_dev,
155 i,
156 ADF_C4XXX_PART_ASYM);
157 if (BIT(i) & au_info->dc_ae_msk)
158 adf_enable_dc_threads(accel_dev, i, ADF_C4XXX_PART_DC);
159 }
160
161 return 0;
162 }
163
164 /* Disable the resource partitioning feature
165 * and restore the default partitioning scheme
166 */
167 void
168 adf_exit_arb_c4xxx(struct adf_accel_dev *accel_dev)
169 {
170 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
171 struct resource *csr;
172 u32 i;
173 unsigned long ae_mask;
174
175 if (!accel_dev->transport)
176 return;
177 csr = accel_dev->transport->banks[0].csr_addr;
178
179 /* Restore the default partitionLUT registers */
180 for (i = 0; i < hw_data->num_banks; i++)
181 WRITE_CSR_WQM(csr,
182 ADF_C4XXX_PARTITION_LUT_OFFSET,
183 i,
184 ADF_C4XXX_DEFAULT_PARTITIONS);
185
186 ae_mask = hw_data->ae_mask;
187
188 /* Reset worker thread to partition mapping */
189 for (i = 0; i < hw_data->num_engines * ADF_NUM_THREADS_PER_AE; i++) {
190 if (!test_bit((u32)(i / ADF_NUM_THREADS_PER_AE), &ae_mask))
191 continue;
192
193 WRITE_CSR_WQM(csr, ADF_C4XXX_WRKTHD2PARTMAP, i, 0);
194 }
195 }
Cache object: 27b1f4556ae815e59480356e561db40f
|