1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Keyur Chudgar <kchudgar@apm.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "xgene_enet_main.h"
22 #include "xgene_enet_hw.h"
23 #include "xgene_enet_sgmac.h"
25 static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
27 iowrite32(val, p->eth_csr_addr + offset);
30 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
33 iowrite32(val, p->eth_ring_if_addr + offset);
36 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
39 iowrite32(val, p->eth_diag_csr_addr + offset);
42 static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
43 u32 wr_addr, u32 wr_data)
47 iowrite32(wr_addr, ctl->addr);
48 iowrite32(wr_data, ctl->ctl);
49 iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
51 /* wait for write command to complete */
52 for (i = 0; i < 10; i++) {
53 if (ioread32(ctl->cmd_done)) {
54 iowrite32(0, ctl->cmd);
63 static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
64 u32 wr_addr, u32 wr_data)
66 struct xgene_indirect_ctl ctl = {
67 .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
68 .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
69 .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
70 .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
73 if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
74 netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
77 static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
79 return ioread32(p->eth_csr_addr + offset);
82 static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
84 return ioread32(p->eth_diag_csr_addr + offset);
87 static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
92 iowrite32(rd_addr, ctl->addr);
93 iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
95 /* wait for read command to complete */
96 for (i = 0; i < 10; i++) {
97 if (ioread32(ctl->cmd_done)) {
98 rd_data = ioread32(ctl->ctl);
99 iowrite32(0, ctl->cmd);
106 pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
111 static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
113 struct xgene_indirect_ctl ctl = {
114 .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
115 .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
116 .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
117 .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
120 return xgene_enet_rd_indirect(&ctl, rd_addr);
123 static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
125 struct net_device *ndev = p->ndev;
129 xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
131 usleep_range(100, 110);
132 data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
137 netdev_err(ndev, "Failed to release memory from shutdown\n");
141 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
143 u32 val = 0xffffffff;
145 xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
146 xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
149 static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
152 u32 addr, wr_data, done;
155 addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
156 xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
158 wr_data = PHY_CONTROL(data);
159 xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
161 for (i = 0; i < 10; i++) {
162 done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
163 if (!(done & BUSY_MASK))
165 usleep_range(10, 20);
168 netdev_err(p->ndev, "MII_MGMT write failed\n");
171 static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
173 u32 addr, data, done;
176 addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
177 xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
178 xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
180 for (i = 0; i < 10; i++) {
181 done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
182 if (!(done & BUSY_MASK)) {
183 data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
184 xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
188 usleep_range(10, 20);
191 netdev_err(p->ndev, "MII_MGMT read failed\n");
196 static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
198 xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
199 xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
202 static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
205 u8 *dev_addr = p->ndev->dev_addr;
207 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
208 (dev_addr[1] << 8) | dev_addr[0];
209 xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
211 addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
212 addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
213 xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
216 static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
220 data = xgene_mii_phy_read(p, INT_PHY_ADDR,
221 SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
223 return data & LINK_UP;
226 static void xgene_sgmac_init(struct xgene_enet_pdata *p)
230 xgene_sgmac_reset(p);
232 /* Enable auto-negotiation */
233 xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
234 xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
237 data = xgene_mii_phy_read(p, INT_PHY_ADDR,
238 SGMII_STATUS_ADDR >> 2);
239 if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
241 usleep_range(10, 20);
243 if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
244 netdev_err(p->ndev, "Auto-negotiation failed\n");
246 data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
247 ENET_INTERFACE_MODE2_SET(&data, 2);
248 xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
249 xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
251 data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
252 data |= MPA_IDLE_WITH_QMI_EMPTY;
253 xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
255 xgene_sgmac_set_mac_addr(p);
257 data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
258 data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
259 xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
261 /* Adjust MDC clock frequency */
262 data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
263 MGMT_CLOCK_SEL_SET(&data, 7);
264 xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
266 /* Enable drop if bufpool not available */
267 data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
268 data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
269 xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
271 /* Rtype should be copied from FP */
272 xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
274 /* Bypass traffic gating */
275 xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
276 xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
277 xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0);
280 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
284 data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
291 xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
294 static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
296 xgene_sgmac_rxtx(p, RX_EN, true);
299 static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
301 xgene_sgmac_rxtx(p, TX_EN, true);
304 static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
306 xgene_sgmac_rxtx(p, RX_EN, false);
309 static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
311 xgene_sgmac_rxtx(p, TX_EN, false);
314 static void xgene_enet_reset(struct xgene_enet_pdata *p)
316 clk_prepare_enable(p->clk);
317 clk_disable_unprepare(p->clk);
318 clk_prepare_enable(p->clk);
320 xgene_enet_ecc_init(p);
321 xgene_enet_config_ring_if_assoc(p);
324 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
325 u32 dst_ring_num, u16 bufpool_id)
329 data = CFG_CLE_BYPASS_EN0;
330 xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data);
332 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
333 data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
334 xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data);
337 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
339 clk_disable_unprepare(p->clk);
342 static void xgene_enet_link_state(struct work_struct *work)
344 struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
345 struct xgene_enet_pdata, link_work);
346 struct net_device *ndev = p->ndev;
347 u32 link, poll_interval;
349 link = xgene_enet_link_status(p);
351 if (!netif_carrier_ok(ndev)) {
352 netif_carrier_on(ndev);
354 xgene_sgmac_rx_enable(p);
355 xgene_sgmac_tx_enable(p);
356 netdev_info(ndev, "Link is Up - 1Gbps\n");
358 poll_interval = PHY_POLL_LINK_ON;
360 if (netif_carrier_ok(ndev)) {
361 xgene_sgmac_rx_disable(p);
362 xgene_sgmac_tx_disable(p);
363 netif_carrier_off(ndev);
364 netdev_info(ndev, "Link is Down\n");
366 poll_interval = PHY_POLL_LINK_OFF;
369 schedule_delayed_work(&p->link_work, poll_interval);
372 struct xgene_mac_ops xgene_sgmac_ops = {
373 .init = xgene_sgmac_init,
374 .reset = xgene_sgmac_reset,
375 .rx_enable = xgene_sgmac_rx_enable,
376 .tx_enable = xgene_sgmac_tx_enable,
377 .rx_disable = xgene_sgmac_rx_disable,
378 .tx_disable = xgene_sgmac_tx_disable,
379 .set_mac_addr = xgene_sgmac_set_mac_addr,
380 .link_state = xgene_enet_link_state
383 struct xgene_port_ops xgene_sgport_ops = {
384 .reset = xgene_enet_reset,
385 .cle_bypass = xgene_enet_cle_bypass,
386 .shutdown = xgene_enet_shutdown