1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2020, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34 /*$FreeBSD$*/
35
36 #include "e1000_hw.h"
37 #include "e1000_82575.h"
38 #include "e1000_mac.h"
39 #include "e1000_base.h"
40 #include "e1000_manage.h"
41
42 /**
43 * e1000_acquire_phy_base - Acquire rights to access PHY
44 * @hw: pointer to the HW structure
45 *
46 * Acquire access rights to the correct PHY.
47 **/
48 s32 e1000_acquire_phy_base(struct e1000_hw *hw)
49 {
50 u16 mask = E1000_SWFW_PHY0_SM;
51
52 DEBUGFUNC("e1000_acquire_phy_base");
53
54 if (hw->bus.func == E1000_FUNC_1)
55 mask = E1000_SWFW_PHY1_SM;
56 else if (hw->bus.func == E1000_FUNC_2)
57 mask = E1000_SWFW_PHY2_SM;
58 else if (hw->bus.func == E1000_FUNC_3)
59 mask = E1000_SWFW_PHY3_SM;
60
61 return hw->mac.ops.acquire_swfw_sync(hw, mask);
62 }
63
64 /**
65 * e1000_release_phy_base - Release rights to access PHY
66 * @hw: pointer to the HW structure
67 *
68 * A wrapper to release access rights to the correct PHY.
69 **/
70 void e1000_release_phy_base(struct e1000_hw *hw)
71 {
72 u16 mask = E1000_SWFW_PHY0_SM;
73
74 DEBUGFUNC("e1000_release_phy_base");
75
76 if (hw->bus.func == E1000_FUNC_1)
77 mask = E1000_SWFW_PHY1_SM;
78 else if (hw->bus.func == E1000_FUNC_2)
79 mask = E1000_SWFW_PHY2_SM;
80 else if (hw->bus.func == E1000_FUNC_3)
81 mask = E1000_SWFW_PHY3_SM;
82
83 hw->mac.ops.release_swfw_sync(hw, mask);
84 }
85
86 /**
87 * e1000_init_hw_base - Initialize hardware
88 * @hw: pointer to the HW structure
89 *
90 * This inits the hardware readying it for operation.
91 **/
92 s32 e1000_init_hw_base(struct e1000_hw *hw)
93 {
94 struct e1000_mac_info *mac = &hw->mac;
95 s32 ret_val;
96 u16 i, rar_count = mac->rar_entry_count;
97
98 DEBUGFUNC("e1000_init_hw_base");
99
100 /* Setup the receive address */
101 e1000_init_rx_addrs_generic(hw, rar_count);
102
103 /* Zero out the Multicast HASH table */
104 DEBUGOUT("Zeroing the MTA\n");
105 for (i = 0; i < mac->mta_reg_count; i++)
106 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
107
108 /* Zero out the Unicast HASH table */
109 DEBUGOUT("Zeroing the UTA\n");
110 for (i = 0; i < mac->uta_reg_count; i++)
111 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
112
113 /* Setup link and flow control */
114 ret_val = mac->ops.setup_link(hw);
115
116 /* Clear all of the statistics registers (clear on read). It is
117 * important that we do this after we have tried to establish link
118 * because the symbol error count will increment wildly if there
119 * is no link.
120 */
121 e1000_clear_hw_cntrs_base_generic(hw);
122
123 return ret_val;
124 }
125
126 /**
127 * e1000_power_down_phy_copper_base - Remove link during PHY power down
128 * @hw: pointer to the HW structure
129 *
130 * In the case of a PHY power down to save power, or to turn off link during a
131 * driver unload, or wake on lan is not enabled, remove the link.
132 **/
133 void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
134 {
135 struct e1000_phy_info *phy = &hw->phy;
136
137 if (!(phy->ops.check_reset_block))
138 return;
139
140 /* If the management interface is not enabled, then power down */
141 if (phy->ops.check_reset_block(hw))
142 e1000_power_down_phy_copper(hw);
143 }
144
145 /**
146 * e1000_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
147 * @hw: pointer to the HW structure
148 *
149 * After Rx enable, if manageability is enabled then there is likely some
150 * bad data at the start of the FIFO and possibly in the DMA FIFO. This
151 * function clears the FIFOs and flushes any packets that came in as Rx was
152 * being enabled.
153 **/
154 void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
155 {
156 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
157 int i, ms_wait;
158
159 DEBUGFUNC("e1000_rx_fifo_flush_base");
160
161 /* disable IPv6 options as per hardware errata */
162 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
163 rfctl |= E1000_RFCTL_IPV6_EX_DIS;
164 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
165
166 if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
167 return;
168
169 /* Disable all Rx queues */
170 for (i = 0; i < 4; i++) {
171 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
172 E1000_WRITE_REG(hw, E1000_RXDCTL(i),
173 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
174 }
175 /* Poll all queues to verify they have shut down */
176 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
177 msec_delay(1);
178 rx_enabled = 0;
179 for (i = 0; i < 4; i++)
180 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
181 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
182 break;
183 }
184
185 if (ms_wait == 10)
186 DEBUGOUT("Queue disable timed out after 10ms\n");
187
188 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
189 * incoming packets are rejected. Set enable and wait 2ms so that
190 * any packet that was coming in as RCTL.EN was set is flushed
191 */
192 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
193
194 rlpml = E1000_READ_REG(hw, E1000_RLPML);
195 E1000_WRITE_REG(hw, E1000_RLPML, 0);
196
197 rctl = E1000_READ_REG(hw, E1000_RCTL);
198 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
199 temp_rctl |= E1000_RCTL_LPE;
200
201 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
202 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
203 E1000_WRITE_FLUSH(hw);
204 msec_delay(2);
205
206 /* Enable Rx queues that were previously enabled and restore our
207 * previous state
208 */
209 for (i = 0; i < 4; i++)
210 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
211 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
212 E1000_WRITE_FLUSH(hw);
213
214 E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
215 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
216
217 /* Flush receive errors generated by workaround */
218 E1000_READ_REG(hw, E1000_ROC);
219 E1000_READ_REG(hw, E1000_RNBC);
220 E1000_READ_REG(hw, E1000_MPC);
221 }
Cache object: 0ec59c6f4701f98d876e331b286720c3
|