blob: 74e146f470c60e9df5ff01806623a0aaaaa0ec82 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070036 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040 { 0 }
41};
42MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000043/* UE Status Low CSR */
44static char *ue_status_low_desc[] = {
45 "CEV",
46 "CTX",
47 "DBUF",
48 "ERX",
49 "Host",
50 "MPU",
51 "NDMA",
52 "PTC ",
53 "RDMA ",
54 "RXF ",
55 "RXIPS ",
56 "RXULP0 ",
57 "RXULP1 ",
58 "RXULP2 ",
59 "TIM ",
60 "TPOST ",
61 "TPRE ",
62 "TXIPS ",
63 "TXULP0 ",
64 "TXULP1 ",
65 "UC ",
66 "WDMA ",
67 "TXULP2 ",
68 "HOST1 ",
69 "P0_OB_LINK ",
70 "P1_OB_LINK ",
71 "HOST_GPIO ",
72 "MBOX ",
73 "AXGMAC0",
74 "AXGMAC1",
75 "JTAG",
76 "MPU_INTPEND"
77};
78/* UE Status High CSR */
79static char *ue_status_hi_desc[] = {
80 "LPCMEMHOST",
81 "MGMT_MAC",
82 "PCS0ONLINE",
83 "MPU_IRAM",
84 "PCS1ONLINE",
85 "PCTL0",
86 "PCTL1",
87 "PMEM",
88 "RR",
89 "TXPB",
90 "RXPP",
91 "XAUI",
92 "TXP",
93 "ARM",
94 "IPC",
95 "HOST2",
96 "HOST3",
97 "HOST4",
98 "HOST5",
99 "HOST6",
100 "HOST7",
101 "HOST8",
102 "HOST9",
103 "NETC"
104 "Unknown",
105 "Unknown",
106 "Unknown",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown"
112};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700113
114static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115{
116 struct be_dma_mem *mem = &q->dma_mem;
117 if (mem->va)
118 pci_free_consistent(adapter->pdev, mem->size,
119 mem->va, mem->dma);
120}
121
122static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
123 u16 len, u16 entry_size)
124{
125 struct be_dma_mem *mem = &q->dma_mem;
126
127 memset(q, 0, sizeof(*q));
128 q->len = len;
129 q->entry_size = entry_size;
130 mem->size = len * entry_size;
131 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
132 if (!mem->va)
133 return -1;
134 memset(mem->va, 0, mem->size);
135 return 0;
136}
137
Sathya Perla8788fdc2009-07-27 22:52:03 +0000138static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000140 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141 u32 reg = ioread32(addr);
142 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000143
Sathya Perlacf588472010-02-14 21:22:01 +0000144 if (adapter->eeh_err)
145 return;
146
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 iowrite32(reg, addr);
155}
156
Sathya Perla8788fdc2009-07-27 22:52:03 +0000157static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158{
159 u32 val = 0;
160 val |= qid & DB_RQ_RING_ID_MASK;
161 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000162
163 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000164 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165}
166
Sathya Perla8788fdc2009-07-27 22:52:03 +0000167static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168{
169 u32 val = 0;
170 val |= qid & DB_TXULP_RING_ID_MASK;
171 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000172
173 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 bool arm, bool clear_int, u16 num_popped)
179{
180 u32 val = 0;
181 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlacf588472010-02-14 21:22:01 +0000182
183 if (adapter->eeh_err)
184 return;
185
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186 if (arm)
187 val |= 1 << DB_EQ_REARM_SHIFT;
188 if (clear_int)
189 val |= 1 << DB_EQ_CLR_SHIFT;
190 val |= 1 << DB_EQ_EVNT_SHIFT;
191 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193}
194
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196{
197 u32 val = 0;
198 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_CQ_REARM_SHIFT;
205 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207}
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209static int be_mac_addr_set(struct net_device *netdev, void *p)
210{
211 struct be_adapter *adapter = netdev_priv(netdev);
212 struct sockaddr *addr = p;
213 int status = 0;
214
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000215 if (!is_valid_ether_addr(addr->sa_data))
216 return -EADDRNOTAVAIL;
217
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000218 /* MAC addr configuration will be done in hardware for VFs
219 * by their corresponding PFs. Just copy to netdev addr here
220 */
221 if (!be_physfn(adapter))
222 goto netdev_addr;
223
Sathya Perlaa65027e2009-08-17 00:58:04 +0000224 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
225 if (status)
226 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227
Sathya Perlaa65027e2009-08-17 00:58:04 +0000228 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
229 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000230netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (!status)
232 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
233
234 return status;
235}
236
Sathya Perlab31c50a2009-09-17 10:30:13 -0700237void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238{
239 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241 struct be_port_rxf_stats *port_stats =
242 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700243 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000244 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245
Ajit Khaparde91992e42010-02-19 13:57:12 +0000246 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250
251 /* bad pkts received */
252 dev_stats->rx_errors = port_stats->rx_crc_errors +
253 port_stats->rx_alignment_symbol_errors +
254 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000255 port_stats->rx_out_range_errors +
256 port_stats->rx_frame_too_long +
257 port_stats->rx_dropped_too_small +
258 port_stats->rx_dropped_too_short +
259 port_stats->rx_dropped_header_too_small +
260 port_stats->rx_dropped_tcp_length +
261 port_stats->rx_dropped_runt +
262 port_stats->rx_tcp_checksum_errs +
263 port_stats->rx_ip_checksum_errs +
264 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700265
Sathya Perla68110862009-06-10 02:21:16 +0000266 /* no space in linux buffers: best possible approximation */
Sathya Perla01ed30d2009-11-22 22:01:31 +0000267 dev_stats->rx_dropped =
268 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700269
270 /* detailed rx errors */
271 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000272 port_stats->rx_out_range_errors +
273 port_stats->rx_frame_too_long;
274
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275 /* receive ring buffer overflow */
276 dev_stats->rx_over_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000277
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700278 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
279
280 /* frame alignment errors */
281 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000282
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700283 /* receiver fifo overrun */
284 /* drops_no_pbuf is no per i/f, it's per BE card */
285 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
286 port_stats->rx_input_fifo_overflow +
287 rxf_stats->rx_drops_no_pbuf;
288 /* receiver missed packetd */
289 dev_stats->rx_missed_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000290
291 /* packet transmit problems */
292 dev_stats->tx_errors = 0;
293
294 /* no space available in linux */
295 dev_stats->tx_dropped = 0;
296
Ajit Khapardec5b9b922009-10-05 02:21:51 +0000297 dev_stats->multicast = port_stats->rx_multicast_frames;
Sathya Perla68110862009-06-10 02:21:16 +0000298 dev_stats->collisions = 0;
299
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 /* detailed tx_errors */
301 dev_stats->tx_aborted_errors = 0;
302 dev_stats->tx_carrier_errors = 0;
303 dev_stats->tx_fifo_errors = 0;
304 dev_stats->tx_heartbeat_errors = 0;
305 dev_stats->tx_window_errors = 0;
306}
307
Sathya Perla8788fdc2009-07-27 22:52:03 +0000308void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 struct net_device *netdev = adapter->netdev;
311
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000313 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000314 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000315 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 netif_start_queue(netdev);
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000319 } else {
320 netif_stop_queue(netdev);
321 netif_carrier_off(netdev);
322 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000324 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700326}
327
328/* Update the EQ delay n BE based on the RX frags consumed / sec */
329static void be_rx_eqd_update(struct be_adapter *adapter)
330{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700331 struct be_eq_obj *rx_eq = &adapter->rx_eq;
332 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700333 ulong now = jiffies;
334 u32 eqd;
335
336 if (!rx_eq->enable_aic)
337 return;
338
339 /* Wrapped around */
340 if (time_before(now, stats->rx_fps_jiffies)) {
341 stats->rx_fps_jiffies = now;
342 return;
343 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700344
345 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700346 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700347 return;
348
349 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700350 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla4097f662009-03-24 16:40:13 -0700352 stats->rx_fps_jiffies = now;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700353 stats->be_prev_rx_frags = stats->be_rx_frags;
354 eqd = stats->be_rx_fps / 110000;
355 eqd = eqd << 3;
356 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd;
358 if (eqd < rx_eq->min_eqd)
359 eqd = rx_eq->min_eqd;
360 if (eqd < 10)
361 eqd = 0;
362 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000363 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700364
365 rx_eq->cur_eqd = eqd;
366}
367
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700368static struct net_device_stats *be_get_stats(struct net_device *dev)
369{
Ajit Khaparde78122a52009-10-07 03:11:20 -0700370 return &dev->stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700371}
372
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700373static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374{
375 u64 rate = bytes;
376
377 do_div(rate, ticks / HZ);
378 rate <<= 3; /* bytes/sec -> bits/sec */
379 do_div(rate, 1000000ul); /* MB/Sec */
380
381 return rate;
382}
383
Sathya Perla4097f662009-03-24 16:40:13 -0700384static void be_tx_rate_update(struct be_adapter *adapter)
385{
386 struct be_drvr_stats *stats = drvr_stats(adapter);
387 ulong now = jiffies;
388
389 /* Wrapped around? */
390 if (time_before(now, stats->be_tx_jiffies)) {
391 stats->be_tx_jiffies = now;
392 return;
393 }
394
395 /* Update tx rate once in two seconds */
396 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700397 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
398 - stats->be_tx_bytes_prev,
399 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700400 stats->be_tx_jiffies = now;
401 stats->be_tx_bytes_prev = stats->be_tx_bytes;
402 }
403}
404
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700405static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000406 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407{
Sathya Perla4097f662009-03-24 16:40:13 -0700408 struct be_drvr_stats *stats = drvr_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409 stats->be_tx_reqs++;
410 stats->be_tx_wrbs += wrb_cnt;
411 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000412 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700413 if (stopped)
414 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700415}
416
417/* Determine number of WRB entries needed to xmit data in an skb */
418static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
419{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700420 int cnt = (skb->len > skb->data_len);
421
422 cnt += skb_shinfo(skb)->nr_frags;
423
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424 /* to account for hdr wrb */
425 cnt++;
426 if (cnt & 1) {
427 /* add a dummy to make it an even num */
428 cnt++;
429 *dummy = true;
430 } else
431 *dummy = false;
432 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433 return cnt;
434}
435
436static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
437{
438 wrb->frag_pa_hi = upper_32_bits(addr);
439 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
440 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
441}
442
443static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
444 bool vlan, u32 wrb_cnt, u32 len)
445{
446 memset(hdr, 0, sizeof(*hdr));
447
448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000450 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453 hdr, skb_shinfo(skb)->gso_size);
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000454 if (skb_is_gso_v6(skb))
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700456 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
457 if (is_tcp_pkt(skb))
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
459 else if (is_udp_pkt(skb))
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
461 }
462
463 if (vlan && vlan_tx_tag_present(skb)) {
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
465 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
466 hdr, vlan_tx_tag_get(skb));
467 }
468
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
470 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
471 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
472 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
473}
474
Sathya Perla7101e112010-03-22 20:41:12 +0000475static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
476 bool unmap_single)
477{
478 dma_addr_t dma;
479
480 be_dws_le_to_cpu(wrb, sizeof(*wrb));
481
482 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000483 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000484 if (unmap_single)
485 pci_unmap_single(pdev, dma, wrb->frag_len,
486 PCI_DMA_TODEVICE);
487 else
488 pci_unmap_page(pdev, dma, wrb->frag_len,
489 PCI_DMA_TODEVICE);
490 }
491}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492
493static int make_tx_wrbs(struct be_adapter *adapter,
494 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
495{
Sathya Perla7101e112010-03-22 20:41:12 +0000496 dma_addr_t busaddr;
497 int i, copied = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 struct pci_dev *pdev = adapter->pdev;
499 struct sk_buff *first_skb = skb;
500 struct be_queue_info *txq = &adapter->tx_obj.q;
501 struct be_eth_wrb *wrb;
502 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000503 bool map_single = false;
504 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 hdr = queue_head_node(txq);
507 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000508 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509
David S. Millerebc8d2a2009-06-09 01:01:31 -0700510 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700511 int len = skb_headlen(skb);
Alexander Duycka73b7962009-12-02 16:48:18 +0000512 busaddr = pci_map_single(pdev, skb->data, len,
513 PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000514 if (pci_dma_mapping_error(pdev, busaddr))
515 goto dma_err;
516 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700517 wrb = queue_head_node(txq);
518 wrb_fill(wrb, busaddr, len);
519 be_dws_cpu_to_le(wrb, sizeof(*wrb));
520 queue_head_inc(txq);
521 copied += len;
522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
525 struct skb_frag_struct *frag =
526 &skb_shinfo(skb)->frags[i];
Alexander Duycka73b7962009-12-02 16:48:18 +0000527 busaddr = pci_map_page(pdev, frag->page,
528 frag->page_offset,
529 frag->size, PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000530 if (pci_dma_mapping_error(pdev, busaddr))
531 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700532 wrb = queue_head_node(txq);
533 wrb_fill(wrb, busaddr, frag->size);
534 be_dws_cpu_to_le(wrb, sizeof(*wrb));
535 queue_head_inc(txq);
536 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 }
538
539 if (dummy_wrb) {
540 wrb = queue_head_node(txq);
541 wrb_fill(wrb, 0, 0);
542 be_dws_cpu_to_le(wrb, sizeof(*wrb));
543 queue_head_inc(txq);
544 }
545
546 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
547 wrb_cnt, copied);
548 be_dws_cpu_to_le(hdr, sizeof(*hdr));
549
550 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000551dma_err:
552 txq->head = map_head;
553 while (copied) {
554 wrb = queue_head_node(txq);
555 unmap_tx_frag(pdev, wrb, map_single);
556 map_single = false;
557 copied -= wrb->frag_len;
558 queue_head_inc(txq);
559 }
560 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561}
562
Stephen Hemminger613573252009-08-31 19:50:58 +0000563static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700564 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565{
566 struct be_adapter *adapter = netdev_priv(netdev);
567 struct be_tx_obj *tx_obj = &adapter->tx_obj;
568 struct be_queue_info *txq = &tx_obj->q;
569 u32 wrb_cnt = 0, copied = 0;
570 u32 start = txq->head;
571 bool dummy_wrb, stopped = false;
572
573 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
574
575 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000576 if (copied) {
577 /* record the sent skb in the sent_skb table */
578 BUG_ON(tx_obj->sent_skb_list[start]);
579 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000581 /* Ensure txq has space for the next skb; Else stop the queue
582 * *BEFORE* ringing the tx doorbell, so that we serialze the
583 * tx compls of the current transmit which'll wake up the queue
584 */
Sathya Perla7101e112010-03-22 20:41:12 +0000585 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
587 txq->len) {
588 netif_stop_queue(netdev);
589 stopped = true;
590 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000592 be_txq_notify(adapter, txq->id, wrb_cnt);
593
Ajit Khaparde91992e42010-02-19 13:57:12 +0000594 be_tx_stats_update(adapter, wrb_cnt, copied,
595 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000596 } else {
597 txq->head = start;
598 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600 return NETDEV_TX_OK;
601}
602
603static int be_change_mtu(struct net_device *netdev, int new_mtu)
604{
605 struct be_adapter *adapter = netdev_priv(netdev);
606 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000607 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
608 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 dev_info(&adapter->pdev->dev,
610 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000611 BE_MIN_MTU,
612 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 return -EINVAL;
614 }
615 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
616 netdev->mtu, new_mtu);
617 netdev->mtu = new_mtu;
618 return 0;
619}
620
621/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000622 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
623 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000625static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 u16 vtag[BE_NUM_VLANS_SUPPORTED];
628 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000629 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000630 u32 if_handle;
631
632 if (vf) {
633 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
Ajit Khaparde82903e42010-02-09 01:34:57 +0000638 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 /* Construct VLAN Table to give to HW */
640 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
641 if (adapter->vlan_tag[i]) {
642 vtag[ntags] = cpu_to_le16(i);
643 ntags++;
644 }
645 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700646 status = be_cmd_vlan_config(adapter, adapter->if_handle,
647 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700649 status = be_cmd_vlan_config(adapter, adapter->if_handle,
650 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000652
Sathya Perlab31c50a2009-09-17 10:30:13 -0700653 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654}
655
656static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
657{
658 struct be_adapter *adapter = netdev_priv(netdev);
659 struct be_eq_obj *rx_eq = &adapter->rx_eq;
660 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
Sathya Perla8788fdc2009-07-27 22:52:03 +0000662 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
663 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 adapter->vlan_grp = grp;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000665 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
666 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
669static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670{
671 struct be_adapter *adapter = netdev_priv(netdev);
672
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000673 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000674 if (!be_physfn(adapter))
675 return;
676
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000678 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000679 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
682static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683{
684 struct be_adapter *adapter = netdev_priv(netdev);
685
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000686 adapter->vlans_added--;
687 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000689 if (!be_physfn(adapter))
690 return;
691
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000693 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000694 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695}
696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697static void be_set_multicast_list(struct net_device *netdev)
698{
699 struct be_adapter *adapter = netdev_priv(netdev);
700
701 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000702 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000703 adapter->promiscuous = true;
704 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000706
707 /* BE was previously in promiscous mode; disable it */
708 if (adapter->promiscuous) {
709 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000710 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000711 }
712
Sathya Perlae7b909a2009-11-22 22:01:10 +0000713 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000714 if (netdev->flags & IFF_ALLMULTI ||
715 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000716 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000717 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000718 goto done;
719 }
720
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000721 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800722 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000723done:
724 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725}
726
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000727static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728{
729 struct be_adapter *adapter = netdev_priv(netdev);
730 int status;
731
732 if (!adapter->sriov_enabled)
733 return -EPERM;
734
735 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
736 return -EINVAL;
737
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000738 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
739 status = be_cmd_pmac_del(adapter,
740 adapter->vf_cfg[vf].vf_if_handle,
741 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000742
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000743 status = be_cmd_pmac_add(adapter, mac,
744 adapter->vf_cfg[vf].vf_if_handle,
745 &adapter->vf_cfg[vf].vf_pmac_id);
746
747 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000748 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
749 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000750 else
751 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000753 return status;
754}
755
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000756static int be_get_vf_config(struct net_device *netdev, int vf,
757 struct ifla_vf_info *vi)
758{
759 struct be_adapter *adapter = netdev_priv(netdev);
760
761 if (!adapter->sriov_enabled)
762 return -EPERM;
763
764 if (vf >= num_vfs)
765 return -EINVAL;
766
767 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000768 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000769 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000770 vi->qos = 0;
771 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773 return 0;
774}
775
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000776static int be_set_vf_vlan(struct net_device *netdev,
777 int vf, u16 vlan, u8 qos)
778{
779 struct be_adapter *adapter = netdev_priv(netdev);
780 int status = 0;
781
782 if (!adapter->sriov_enabled)
783 return -EPERM;
784
785 if ((vf >= num_vfs) || (vlan > 4095))
786 return -EINVAL;
787
788 if (vlan) {
789 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790 adapter->vlans_added++;
791 } else {
792 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793 adapter->vlans_added--;
794 }
795
796 status = be_vid_config(adapter, true, vf);
797
798 if (status)
799 dev_info(&adapter->pdev->dev,
800 "VLAN %d config on VF %d failed\n", vlan, vf);
801 return status;
802}
803
Ajit Khapardee1d18732010-07-23 01:52:13 +0000804static int be_set_vf_tx_rate(struct net_device *netdev,
805 int vf, int rate)
806{
807 struct be_adapter *adapter = netdev_priv(netdev);
808 int status = 0;
809
810 if (!adapter->sriov_enabled)
811 return -EPERM;
812
813 if ((vf >= num_vfs) || (rate < 0))
814 return -EINVAL;
815
816 if (rate > 10000)
817 rate = 10000;
818
819 adapter->vf_cfg[vf].vf_tx_rate = rate;
820 status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822 if (status)
823 dev_info(&adapter->pdev->dev,
824 "tx rate %d on VF %d failed\n", rate, vf);
825 return status;
826}
827
Sathya Perla4097f662009-03-24 16:40:13 -0700828static void be_rx_rate_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829{
Sathya Perla4097f662009-03-24 16:40:13 -0700830 struct be_drvr_stats *stats = drvr_stats(adapter);
831 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832
Sathya Perla4097f662009-03-24 16:40:13 -0700833 /* Wrapped around */
834 if (time_before(now, stats->be_rx_jiffies)) {
835 stats->be_rx_jiffies = now;
836 return;
837 }
838
839 /* Update the rate once in two seconds */
840 if ((now - stats->be_rx_jiffies) < 2 * HZ)
841 return;
842
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700843 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
844 - stats->be_rx_bytes_prev,
845 now - stats->be_rx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700846 stats->be_rx_jiffies = now;
847 stats->be_rx_bytes_prev = stats->be_rx_bytes;
848}
849
850static void be_rx_stats_update(struct be_adapter *adapter,
851 u32 pktsize, u16 numfrags)
852{
853 struct be_drvr_stats *stats = drvr_stats(adapter);
854
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 stats->be_rx_compl++;
856 stats->be_rx_frags += numfrags;
857 stats->be_rx_bytes += pktsize;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000858 stats->be_rx_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859}
860
Ajit Khaparde728a9972009-04-13 15:41:22 -0700861static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
862{
863 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
864
865 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
866 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
867 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
868 if (ip_version) {
869 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
870 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
871 }
872 ipv6_chk = (ip_version && (tcpf || udpf));
873
874 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
875}
876
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877static struct be_rx_page_info *
878get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
879{
880 struct be_rx_page_info *rx_page_info;
881 struct be_queue_info *rxq = &adapter->rx_obj.q;
882
883 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
884 BUG_ON(!rx_page_info->page);
885
Ajit Khaparde205859a2010-02-09 01:34:21 +0000886 if (rx_page_info->last_page_user) {
FUJITA Tomonorifac6da52010-04-01 16:53:22 +0000887 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888 adapter->big_page_size, PCI_DMA_FROMDEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000889 rx_page_info->last_page_user = false;
890 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891
892 atomic_dec(&rxq->used);
893 return rx_page_info;
894}
895
896/* Throwaway the data in the Rx completion */
897static void be_rx_compl_discard(struct be_adapter *adapter,
898 struct be_eth_rx_compl *rxcp)
899{
900 struct be_queue_info *rxq = &adapter->rx_obj.q;
901 struct be_rx_page_info *page_info;
902 u16 rxq_idx, i, num_rcvd;
903
904 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
905 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
906
907 for (i = 0; i < num_rcvd; i++) {
908 page_info = get_rx_page_info(adapter, rxq_idx);
909 put_page(page_info->page);
910 memset(page_info, 0, sizeof(*page_info));
911 index_inc(&rxq_idx, rxq->len);
912 }
913}
914
915/*
916 * skb_fill_rx_data forms a complete skb for an ether frame
917 * indicated by rxcp.
918 */
919static void skb_fill_rx_data(struct be_adapter *adapter,
Sathya Perla89420422010-02-17 01:35:26 +0000920 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
921 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700922{
923 struct be_queue_info *rxq = &adapter->rx_obj.q;
924 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000925 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700926 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700927 u8 *start;
928
929 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
930 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700931
932 page_info = get_rx_page_info(adapter, rxq_idx);
933
934 start = page_address(page_info->page) + page_info->page_offset;
935 prefetch(start);
936
937 /* Copy data in the first descriptor of this completion */
938 curr_frag_len = min(pktsize, rx_frag_size);
939
940 /* Copy the header portion into skb_data */
941 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
942 memcpy(skb->data, start, hdr_len);
943 skb->len = curr_frag_len;
944 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
945 /* Complete packet has now been moved to data */
946 put_page(page_info->page);
947 skb->data_len = 0;
948 skb->tail += curr_frag_len;
949 } else {
950 skb_shinfo(skb)->nr_frags = 1;
951 skb_shinfo(skb)->frags[0].page = page_info->page;
952 skb_shinfo(skb)->frags[0].page_offset =
953 page_info->page_offset + hdr_len;
954 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
955 skb->data_len = curr_frag_len - hdr_len;
956 skb->tail += hdr_len;
957 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000958 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959
960 if (pktsize <= rx_frag_size) {
961 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000962 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963 }
964
965 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700966 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000967 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700968 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 index_inc(&rxq_idx, rxq->len);
970 page_info = get_rx_page_info(adapter, rxq_idx);
971
Ajit Khapardefa774062009-07-22 09:28:55 -0700972 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700973
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000974 /* Coalesce all frags from the same physical page in one slot */
975 if (page_info->page_offset == 0) {
976 /* Fresh page */
977 j++;
978 skb_shinfo(skb)->frags[j].page = page_info->page;
979 skb_shinfo(skb)->frags[j].page_offset =
980 page_info->page_offset;
981 skb_shinfo(skb)->frags[j].size = 0;
982 skb_shinfo(skb)->nr_frags++;
983 } else {
984 put_page(page_info->page);
985 }
986
987 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988 skb->len += curr_frag_len;
989 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990
Ajit Khaparde205859a2010-02-09 01:34:21 +0000991 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000993 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700994
Sathya Perla76fbb422009-06-10 02:21:56 +0000995done:
Sathya Perla4097f662009-03-24 16:40:13 -0700996 be_rx_stats_update(adapter, pktsize, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997}
998
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700999/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000static void be_rx_compl_process(struct be_adapter *adapter,
1001 struct be_eth_rx_compl *rxcp)
1002{
1003 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001004 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001005 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001006 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007
Sathya Perla89420422010-02-17 01:35:26 +00001008 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1009 /* Is it a flush compl that has no data */
1010 if (unlikely(num_rcvd == 0))
1011 return;
1012
Eric Dumazet89d71a62009-10-13 05:34:20 +00001013 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001014 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015 if (net_ratelimit())
1016 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1017 be_rx_compl_discard(adapter, rxcp);
1018 return;
1019 }
1020
Sathya Perla89420422010-02-17 01:35:26 +00001021 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022
Ajit Khaparde728a9972009-04-13 15:41:22 -07001023 if (do_pkt_csum(rxcp, adapter->rx_csum))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024 skb->ip_summed = CHECKSUM_NONE;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001025 else
1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027
1028 skb->truesize = skb->len + sizeof(struct sk_buff);
1029 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
Sathya Perlaa058a632010-02-17 01:34:22 +00001031 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1032 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1033
1034 /* vlanf could be wrongly set in some cards.
1035 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001036 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001037 vlanf = 0;
1038
1039 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001040 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 kfree_skb(skb);
1042 return;
1043 }
1044 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Ajit Khaparde9cae9e42010-03-31 02:00:32 +00001045 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1047 } else {
1048 netif_receive_skb(skb);
1049 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050}
1051
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001052/* Process the RX completion indicated by rxcp when GRO is enabled */
1053static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054 struct be_eth_rx_compl *rxcp)
1055{
1056 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001057 struct sk_buff *skb = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058 struct be_queue_info *rxq = &adapter->rx_obj.q;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001059 struct be_eq_obj *eq_obj = &adapter->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001061 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001062 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063
1064 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001065 /* Is it a flush compl that has no data */
1066 if (unlikely(num_rcvd == 0))
1067 return;
1068
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1070 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1071 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001072 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1073
1074 /* vlanf could be wrongly set in some cards.
1075 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001076 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001077 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001079 skb = napi_get_frags(&eq_obj->napi);
1080 if (!skb) {
1081 be_rx_compl_discard(adapter, rxcp);
1082 return;
1083 }
1084
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001086 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 page_info = get_rx_page_info(adapter, rxq_idx);
1088
1089 curr_frag_len = min(remaining, rx_frag_size);
1090
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001091 /* Coalesce all frags from the same physical page in one slot */
1092 if (i == 0 || page_info->page_offset == 0) {
1093 /* First frag or Fresh page */
1094 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001095 skb_shinfo(skb)->frags[j].page = page_info->page;
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001099 } else {
1100 put_page(page_info->page);
1101 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001102 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001103
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 memset(page_info, 0, sizeof(*page_info));
1107 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001108 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001110 skb_shinfo(skb)->nr_frags = j + 1;
1111 skb->len = pkt_size;
1112 skb->data_len = pkt_size;
1113 skb->truesize += pkt_size;
1114 skb->ip_summed = CHECKSUM_UNNECESSARY;
1115
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001117 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 } else {
1119 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Ajit Khaparde9cae9e42010-03-31 02:00:32 +00001120 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121
Ajit Khaparde82903e42010-02-09 01:34:57 +00001122 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123 return;
1124
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126 }
1127
Sathya Perla4097f662009-03-24 16:40:13 -07001128 be_rx_stats_update(adapter, pkt_size, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129}
1130
1131static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1132{
1133 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
1134
1135 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1136 return NULL;
1137
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001138 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 queue_tail_inc(&adapter->rx_obj.cq);
1142 return rxcp;
1143}
1144
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001145/* To reset the valid bit, we need to reset the whole word as
1146 * when walking the queue the valid entries are little-endian
1147 * and invalid entries are host endian
1148 */
1149static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1150{
1151 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1152}
1153
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154static inline struct page *be_alloc_pages(u32 size)
1155{
1156 gfp_t alloc_flags = GFP_ATOMIC;
1157 u32 order = get_order(size);
1158 if (order > 0)
1159 alloc_flags |= __GFP_COMP;
1160 return alloc_pages(alloc_flags, order);
1161}
1162
1163/*
1164 * Allocate a page, split it to fragments of size rx_frag_size and post as
1165 * receive buffers to BE
1166 */
1167static void be_post_rx_frags(struct be_adapter *adapter)
1168{
1169 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001170 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171 struct be_queue_info *rxq = &adapter->rx_obj.q;
1172 struct page *pagep = NULL;
1173 struct be_eth_rx_d *rxd;
1174 u64 page_dmaaddr = 0, frag_dmaaddr;
1175 u32 posted, page_offset = 0;
1176
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 page_info = &page_info_tbl[rxq->head];
1178 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1179 if (!pagep) {
1180 pagep = be_alloc_pages(adapter->big_page_size);
1181 if (unlikely(!pagep)) {
1182 drvr_stats(adapter)->be_ethrx_post_fail++;
1183 break;
1184 }
1185 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1186 adapter->big_page_size,
1187 PCI_DMA_FROMDEVICE);
1188 page_info->page_offset = 0;
1189 } else {
1190 get_page(pagep);
1191 page_info->page_offset = page_offset + rx_frag_size;
1192 }
1193 page_offset = page_info->page_offset;
1194 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001195 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1197
1198 rxd = queue_head_node(rxq);
1199 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1200 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201
1202 /* Any space left in the current big page for another frag? */
1203 if ((page_offset + rx_frag_size + rx_frag_size) >
1204 adapter->big_page_size) {
1205 pagep = NULL;
1206 page_info->last_page_user = true;
1207 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001208
1209 prev_page_info = page_info;
1210 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 page_info = &page_info_tbl[rxq->head];
1212 }
1213 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001214 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215
1216 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001218 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001219 } else if (atomic_read(&rxq->used) == 0) {
1220 /* Let be_worker replenish when memory is available */
1221 adapter->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223}
1224
Sathya Perla5fb379e2009-06-18 00:02:59 +00001225static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001226{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1228
1229 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1230 return NULL;
1231
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001232 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1234
1235 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1236
1237 queue_tail_inc(tx_cq);
1238 return txcp;
1239}
1240
1241static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1242{
1243 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001244 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1246 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001247 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1248 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001250 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001252 sent_skbs[txq->tail] = NULL;
1253
1254 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001255 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001257 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001259 wrb = queue_tail_node(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001260 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
Eric Dumazete743d312010-04-14 15:59:40 -07001261 skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001262 unmap_skb_hdr = false;
1263
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 num_wrbs++;
1265 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001266 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267
1268 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001269
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 kfree_skb(sent_skb);
1271}
1272
Sathya Perla859b1e42009-08-10 03:43:51 +00001273static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1274{
1275 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1276
1277 if (!eqe->evt)
1278 return NULL;
1279
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001280 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001281 eqe->evt = le32_to_cpu(eqe->evt);
1282 queue_tail_inc(&eq_obj->q);
1283 return eqe;
1284}
1285
1286static int event_handle(struct be_adapter *adapter,
1287 struct be_eq_obj *eq_obj)
1288{
1289 struct be_eq_entry *eqe;
1290 u16 num = 0;
1291
1292 while ((eqe = event_get(eq_obj)) != NULL) {
1293 eqe->evt = 0;
1294 num++;
1295 }
1296
1297 /* Deal with any spurious interrupts that come
1298 * without events
1299 */
1300 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1301 if (num)
1302 napi_schedule(&eq_obj->napi);
1303
1304 return num;
1305}
1306
1307/* Just read and notify events without processing them.
1308 * Used at the time of destroying event queues */
1309static void be_eq_clean(struct be_adapter *adapter,
1310 struct be_eq_obj *eq_obj)
1311{
1312 struct be_eq_entry *eqe;
1313 u16 num = 0;
1314
1315 while ((eqe = event_get(eq_obj)) != NULL) {
1316 eqe->evt = 0;
1317 num++;
1318 }
1319
1320 if (num)
1321 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1322}
1323
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324static void be_rx_q_clean(struct be_adapter *adapter)
1325{
1326 struct be_rx_page_info *page_info;
1327 struct be_queue_info *rxq = &adapter->rx_obj.q;
1328 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1329 struct be_eth_rx_compl *rxcp;
1330 u16 tail;
1331
1332 /* First cleanup pending rx completions */
1333 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1334 be_rx_compl_discard(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001335 be_rx_compl_reset(rxcp);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001336 be_cq_notify(adapter, rx_cq->id, true, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337 }
1338
1339 /* Then free posted rx buffer that were not used */
1340 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001341 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 page_info = get_rx_page_info(adapter, tail);
1343 put_page(page_info->page);
1344 memset(page_info, 0, sizeof(*page_info));
1345 }
1346 BUG_ON(atomic_read(&rxq->used));
1347}
1348
Sathya Perlaa8e91792009-08-10 03:42:43 +00001349static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001351 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001353 struct be_eth_tx_compl *txcp;
1354 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001355 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1356 struct sk_buff *sent_skb;
1357 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358
Sathya Perlaa8e91792009-08-10 03:42:43 +00001359 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1360 do {
1361 while ((txcp = be_tx_compl_get(tx_cq))) {
1362 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1363 wrb_index, txcp);
1364 be_tx_compl_process(adapter, end_idx);
1365 cmpl++;
1366 }
1367 if (cmpl) {
1368 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1369 cmpl = 0;
1370 }
1371
1372 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1373 break;
1374
1375 mdelay(1);
1376 } while (true);
1377
1378 if (atomic_read(&txq->used))
1379 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1380 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001381
1382 /* free posted tx for which compls will never arrive */
1383 while (atomic_read(&txq->used)) {
1384 sent_skb = sent_skbs[txq->tail];
1385 end_idx = txq->tail;
1386 index_adv(&end_idx,
1387 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1388 be_tx_compl_process(adapter, end_idx);
1389 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390}
1391
Sathya Perla5fb379e2009-06-18 00:02:59 +00001392static void be_mcc_queues_destroy(struct be_adapter *adapter)
1393{
1394 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001395
Sathya Perla8788fdc2009-07-27 22:52:03 +00001396 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001397 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001398 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001399 be_queue_free(adapter, q);
1400
Sathya Perla8788fdc2009-07-27 22:52:03 +00001401 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001402 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001403 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001404 be_queue_free(adapter, q);
1405}
1406
1407/* Must be called only after TX qs are created as MCC shares TX EQ */
1408static int be_mcc_queues_create(struct be_adapter *adapter)
1409{
1410 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001411
1412 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001413 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001414 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001415 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001416 goto err;
1417
1418 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001419 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001420 goto mcc_cq_free;
1421
1422 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001423 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001424 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1425 goto mcc_cq_destroy;
1426
1427 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001428 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429 goto mcc_q_free;
1430
1431 return 0;
1432
1433mcc_q_free:
1434 be_queue_free(adapter, q);
1435mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001436 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437mcc_cq_free:
1438 be_queue_free(adapter, cq);
1439err:
1440 return -1;
1441}
1442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443static void be_tx_queues_destroy(struct be_adapter *adapter)
1444{
1445 struct be_queue_info *q;
1446
1447 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001448 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001449 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450 be_queue_free(adapter, q);
1451
1452 q = &adapter->tx_obj.cq;
1453 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001454 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 be_queue_free(adapter, q);
1456
Sathya Perla859b1e42009-08-10 03:43:51 +00001457 /* Clear any residual events */
1458 be_eq_clean(adapter, &adapter->tx_eq);
1459
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 q = &adapter->tx_eq.q;
1461 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001462 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 be_queue_free(adapter, q);
1464}
1465
1466static int be_tx_queues_create(struct be_adapter *adapter)
1467{
1468 struct be_queue_info *eq, *q, *cq;
1469
1470 adapter->tx_eq.max_eqd = 0;
1471 adapter->tx_eq.min_eqd = 0;
1472 adapter->tx_eq.cur_eqd = 96;
1473 adapter->tx_eq.enable_aic = false;
1474 /* Alloc Tx Event queue */
1475 eq = &adapter->tx_eq.q;
1476 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1477 return -1;
1478
1479 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001480 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 goto tx_eq_free;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001482 adapter->base_eq_id = adapter->tx_eq.q.id;
1483
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 /* Alloc TX eth compl queue */
1485 cq = &adapter->tx_obj.cq;
1486 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1487 sizeof(struct be_eth_tx_compl)))
1488 goto tx_eq_destroy;
1489
1490 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001491 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 goto tx_cq_free;
1493
1494 /* Alloc TX eth queue */
1495 q = &adapter->tx_obj.q;
1496 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1497 goto tx_cq_destroy;
1498
1499 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001500 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 goto tx_q_free;
1502 return 0;
1503
1504tx_q_free:
1505 be_queue_free(adapter, q);
1506tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001507 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508tx_cq_free:
1509 be_queue_free(adapter, cq);
1510tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001511 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512tx_eq_free:
1513 be_queue_free(adapter, eq);
1514 return -1;
1515}
1516
1517static void be_rx_queues_destroy(struct be_adapter *adapter)
1518{
1519 struct be_queue_info *q;
1520
1521 q = &adapter->rx_obj.q;
1522 if (q->created) {
Sathya Perla8788fdc2009-07-27 22:52:03 +00001523 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
Sathya Perla89420422010-02-17 01:35:26 +00001524
1525 /* After the rxq is invalidated, wait for a grace time
1526 * of 1ms for all dma to end and the flush compl to arrive
1527 */
1528 mdelay(1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 be_rx_q_clean(adapter);
1530 }
1531 be_queue_free(adapter, q);
1532
1533 q = &adapter->rx_obj.cq;
1534 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001535 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 be_queue_free(adapter, q);
1537
Sathya Perla859b1e42009-08-10 03:43:51 +00001538 /* Clear any residual events */
1539 be_eq_clean(adapter, &adapter->rx_eq);
1540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 q = &adapter->rx_eq.q;
1542 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001543 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 be_queue_free(adapter, q);
1545}
1546
1547static int be_rx_queues_create(struct be_adapter *adapter)
1548{
1549 struct be_queue_info *eq, *q, *cq;
1550 int rc;
1551
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1553 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1554 adapter->rx_eq.min_eqd = 0;
1555 adapter->rx_eq.cur_eqd = 0;
1556 adapter->rx_eq.enable_aic = true;
1557
1558 /* Alloc Rx Event queue */
1559 eq = &adapter->rx_eq.q;
1560 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1561 sizeof(struct be_eq_entry));
1562 if (rc)
1563 return rc;
1564
1565 /* Ask BE to create Rx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001566 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 if (rc)
1568 goto rx_eq_free;
1569
1570 /* Alloc RX eth compl queue */
1571 cq = &adapter->rx_obj.cq;
1572 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1573 sizeof(struct be_eth_rx_compl));
1574 if (rc)
1575 goto rx_eq_destroy;
1576
1577 /* Ask BE to create Rx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 if (rc)
1580 goto rx_cq_free;
1581
1582 /* Alloc RX eth queue */
1583 q = &adapter->rx_obj.q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1585 if (rc)
1586 goto rx_cq_destroy;
1587
1588 /* Ask BE to create Rx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001589 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1591 if (rc)
1592 goto rx_q_free;
1593
1594 return 0;
1595rx_q_free:
1596 be_queue_free(adapter, q);
1597rx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001598 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599rx_cq_free:
1600 be_queue_free(adapter, cq);
1601rx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001602 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603rx_eq_free:
1604 be_queue_free(adapter, eq);
1605 return rc;
1606}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607
Sathya Perlab628bde2009-08-17 00:58:26 +00001608/* There are 8 evt ids per func. Retruns the evt id's bit number */
1609static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1610{
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001611 return eq_id - adapter->base_eq_id;
Sathya Perlab628bde2009-08-17 00:58:26 +00001612}
1613
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614static irqreturn_t be_intx(int irq, void *dev)
1615{
1616 struct be_adapter *adapter = dev;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001617 int isr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618
Sathya Perla8788fdc2009-07-27 22:52:03 +00001619 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
Sathya Perla55bdeed2010-02-02 07:48:40 -08001620 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
Sathya Perlac001c212009-07-01 01:06:07 +00001621 if (!isr)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001622 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
Sathya Perla8788fdc2009-07-27 22:52:03 +00001624 event_handle(adapter, &adapter->tx_eq);
1625 event_handle(adapter, &adapter->rx_eq);
Sathya Perlac001c212009-07-01 01:06:07 +00001626
Sathya Perla8788fdc2009-07-27 22:52:03 +00001627 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628}
1629
1630static irqreturn_t be_msix_rx(int irq, void *dev)
1631{
1632 struct be_adapter *adapter = dev;
1633
Sathya Perla8788fdc2009-07-27 22:52:03 +00001634 event_handle(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
1636 return IRQ_HANDLED;
1637}
1638
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640{
1641 struct be_adapter *adapter = dev;
1642
Sathya Perla8788fdc2009-07-27 22:52:03 +00001643 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
1645 return IRQ_HANDLED;
1646}
1647
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001648static inline bool do_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 struct be_eth_rx_compl *rxcp)
1650{
1651 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1652 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1653
1654 if (err)
1655 drvr_stats(adapter)->be_rxcp_err++;
1656
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001657 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658}
1659
1660int be_poll_rx(struct napi_struct *napi, int budget)
1661{
1662 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1663 struct be_adapter *adapter =
1664 container_of(rx_eq, struct be_adapter, rx_eq);
1665 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1666 struct be_eth_rx_compl *rxcp;
1667 u32 work_done;
1668
Ajit Khapardeb7b83ac2009-11-29 17:57:22 +00001669 adapter->stats.drvr_stats.be_rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 for (work_done = 0; work_done < budget; work_done++) {
1671 rxcp = be_rx_compl_get(adapter);
1672 if (!rxcp)
1673 break;
1674
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001675 if (do_gro(adapter, rxcp))
1676 be_rx_compl_process_gro(adapter, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 else
1678 be_rx_compl_process(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001679
1680 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 }
1682
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 /* Refill the queue */
1684 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1685 be_post_rx_frags(adapter);
1686
1687 /* All consumed */
1688 if (work_done < budget) {
1689 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001690 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 } else {
1692 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001693 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694 }
1695 return work_done;
1696}
1697
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001698/* As TX and MCC share the same EQ check for both TX and MCC completions.
1699 * For TX/MCC we don't honour budget; consume everything
1700 */
1701static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001703 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1704 struct be_adapter *adapter =
1705 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001706 struct be_queue_info *txq = &adapter->tx_obj.q;
1707 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001709 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 u16 end_idx;
1711
Sathya Perla5fb379e2009-06-18 00:02:59 +00001712 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001714 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001716 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 }
1718
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001719 mcc_compl = be_process_mcc(adapter, &status);
1720
1721 napi_complete(napi);
1722
1723 if (mcc_compl) {
1724 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1725 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1726 }
1727
1728 if (tx_compl) {
1729 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001730
1731 /* As Tx wrbs have been freed up, wake up netdev queue if
1732 * it was stopped due to lack of tx wrbs.
1733 */
1734 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001736 netif_wake_queue(adapter->netdev);
1737 }
1738
1739 drvr_stats(adapter)->be_tx_events++;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001740 drvr_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742
1743 return 1;
1744}
1745
Ajit Khaparde7c185272010-07-29 06:16:33 +00001746static inline bool be_detect_ue(struct be_adapter *adapter)
1747{
1748 u32 online0 = 0, online1 = 0;
1749
1750 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1751
1752 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1753
1754 if (!online0 || !online1) {
1755 adapter->ue_detected = true;
1756 dev_err(&adapter->pdev->dev,
1757 "UE Detected!! online0=%d online1=%d\n",
1758 online0, online1);
1759 return true;
1760 }
1761
1762 return false;
1763}
1764
1765void be_dump_ue(struct be_adapter *adapter)
1766{
1767 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1768 u32 i;
1769
1770 pci_read_config_dword(adapter->pdev,
1771 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1772 pci_read_config_dword(adapter->pdev,
1773 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1774 pci_read_config_dword(adapter->pdev,
1775 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1776 pci_read_config_dword(adapter->pdev,
1777 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1778
1779 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1780 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1781
1782 if (ue_status_lo) {
1783 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1784 if (ue_status_lo & 1)
1785 dev_err(&adapter->pdev->dev,
1786 "UE: %s bit set\n", ue_status_low_desc[i]);
1787 }
1788 }
1789 if (ue_status_hi) {
1790 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1791 if (ue_status_hi & 1)
1792 dev_err(&adapter->pdev->dev,
1793 "UE: %s bit set\n", ue_status_hi_desc[i]);
1794 }
1795 }
1796
1797}
1798
Sathya Perlaea1dae12009-03-19 23:56:20 -07001799static void be_worker(struct work_struct *work)
1800{
1801 struct be_adapter *adapter =
1802 container_of(work, struct be_adapter, work.work);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001803
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001804 if (!adapter->stats_ioctl_sent)
1805 be_cmd_get_stats(adapter, &adapter->stats.cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001806
1807 /* Set EQ delay */
1808 be_rx_eqd_update(adapter);
1809
Sathya Perla4097f662009-03-24 16:40:13 -07001810 be_tx_rate_update(adapter);
1811 be_rx_rate_update(adapter);
1812
Sathya Perlaea1dae12009-03-19 23:56:20 -07001813 if (adapter->rx_post_starved) {
1814 adapter->rx_post_starved = false;
1815 be_post_rx_frags(adapter);
1816 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00001817 if (!adapter->ue_detected) {
1818 if (be_detect_ue(adapter))
1819 be_dump_ue(adapter);
1820 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001821
1822 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1823}
1824
Sathya Perla8d56ff12009-11-22 22:02:26 +00001825static void be_msix_disable(struct be_adapter *adapter)
1826{
1827 if (adapter->msix_enabled) {
1828 pci_disable_msix(adapter->pdev);
1829 adapter->msix_enabled = false;
1830 }
1831}
1832
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833static void be_msix_enable(struct be_adapter *adapter)
1834{
1835 int i, status;
1836
1837 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1838 adapter->msix_entries[i].entry = i;
1839
1840 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1841 BE_NUM_MSIX_VECTORS);
1842 if (status == 0)
1843 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844}
1845
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001846static void be_sriov_enable(struct be_adapter *adapter)
1847{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001848 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001849#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001850 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001851 int status;
1852
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001853 status = pci_enable_sriov(adapter->pdev, num_vfs);
1854 adapter->sriov_enabled = status ? false : true;
1855 }
1856#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001857}
1858
1859static void be_sriov_disable(struct be_adapter *adapter)
1860{
1861#ifdef CONFIG_PCI_IOV
1862 if (adapter->sriov_enabled) {
1863 pci_disable_sriov(adapter->pdev);
1864 adapter->sriov_enabled = false;
1865 }
1866#endif
1867}
1868
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1870{
Sathya Perlab628bde2009-08-17 00:58:26 +00001871 return adapter->msix_entries[
1872 be_evt_bit_get(adapter, eq_id)].vector;
1873}
1874
1875static int be_request_irq(struct be_adapter *adapter,
1876 struct be_eq_obj *eq_obj,
1877 void *handler, char *desc)
1878{
1879 struct net_device *netdev = adapter->netdev;
1880 int vec;
1881
1882 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1883 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1884 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1885}
1886
1887static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1888{
1889 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1890 free_irq(vec, adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891}
1892
1893static int be_msix_register(struct be_adapter *adapter)
1894{
Sathya Perlab628bde2009-08-17 00:58:26 +00001895 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896
Sathya Perlab628bde2009-08-17 00:58:26 +00001897 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 if (status)
1899 goto err;
1900
Sathya Perlab628bde2009-08-17 00:58:26 +00001901 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1902 if (status)
1903 goto free_tx_irq;
1904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001906
1907free_tx_irq:
1908 be_free_irq(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909err:
1910 dev_warn(&adapter->pdev->dev,
1911 "MSIX Request IRQ failed - err %d\n", status);
1912 pci_disable_msix(adapter->pdev);
1913 adapter->msix_enabled = false;
1914 return status;
1915}
1916
1917static int be_irq_register(struct be_adapter *adapter)
1918{
1919 struct net_device *netdev = adapter->netdev;
1920 int status;
1921
1922 if (adapter->msix_enabled) {
1923 status = be_msix_register(adapter);
1924 if (status == 0)
1925 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001926 /* INTx is not supported for VF */
1927 if (!be_physfn(adapter))
1928 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 }
1930
1931 /* INTx */
1932 netdev->irq = adapter->pdev->irq;
1933 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1934 adapter);
1935 if (status) {
1936 dev_err(&adapter->pdev->dev,
1937 "INTx request IRQ failed - err %d\n", status);
1938 return status;
1939 }
1940done:
1941 adapter->isr_registered = true;
1942 return 0;
1943}
1944
1945static void be_irq_unregister(struct be_adapter *adapter)
1946{
1947 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948
1949 if (!adapter->isr_registered)
1950 return;
1951
1952 /* INTx */
1953 if (!adapter->msix_enabled) {
1954 free_irq(netdev->irq, adapter);
1955 goto done;
1956 }
1957
1958 /* MSIx */
Sathya Perlab628bde2009-08-17 00:58:26 +00001959 be_free_irq(adapter, &adapter->tx_eq);
1960 be_free_irq(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961done:
1962 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963}
1964
Sathya Perla889cd4b2010-05-30 23:33:45 +00001965static int be_close(struct net_device *netdev)
1966{
1967 struct be_adapter *adapter = netdev_priv(netdev);
1968 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1969 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1970 int vec;
1971
1972 cancel_delayed_work_sync(&adapter->work);
1973
1974 be_async_mcc_disable(adapter);
1975
1976 netif_stop_queue(netdev);
1977 netif_carrier_off(netdev);
1978 adapter->link_up = false;
1979
1980 be_intr_set(adapter, false);
1981
1982 if (adapter->msix_enabled) {
1983 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1984 synchronize_irq(vec);
1985 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1986 synchronize_irq(vec);
1987 } else {
1988 synchronize_irq(netdev->irq);
1989 }
1990 be_irq_unregister(adapter);
1991
1992 napi_disable(&rx_eq->napi);
1993 napi_disable(&tx_eq->napi);
1994
1995 /* Wait for all pending tx completions to arrive so that
1996 * all tx skbs are freed.
1997 */
1998 be_tx_compl_clean(adapter);
1999
2000 return 0;
2001}
2002
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003static int be_open(struct net_device *netdev)
2004{
2005 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006 struct be_eq_obj *rx_eq = &adapter->rx_eq;
2007 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002008 bool link_up;
2009 int status;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002010 u8 mac_speed;
2011 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002012
2013 /* First time posting */
2014 be_post_rx_frags(adapter);
2015
2016 napi_enable(&rx_eq->napi);
2017 napi_enable(&tx_eq->napi);
2018
2019 be_irq_register(adapter);
2020
Sathya Perla8788fdc2009-07-27 22:52:03 +00002021 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002022
2023 /* The evt queues are created in unarmed state; arm them */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002024 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
2025 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002026
2027 /* Rx compl queue may be in unarmed state; rearm it */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002028 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002029
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002030 /* Now that interrupts are on we can process async mcc */
2031 be_async_mcc_enable(adapter);
2032
Sathya Perla889cd4b2010-05-30 23:33:45 +00002033 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2034
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002035 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2036 &link_speed);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002037 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002038 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002039 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002040
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002041 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002042 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002043 if (status)
2044 goto err;
2045
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002046 status = be_cmd_set_flow_control(adapter,
2047 adapter->tx_fc, adapter->rx_fc);
2048 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002049 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002050 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002051
Sathya Perla889cd4b2010-05-30 23:33:45 +00002052 return 0;
2053err:
2054 be_close(adapter->netdev);
2055 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002056}
2057
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002058static int be_setup_wol(struct be_adapter *adapter, bool enable)
2059{
2060 struct be_dma_mem cmd;
2061 int status = 0;
2062 u8 mac[ETH_ALEN];
2063
2064 memset(mac, 0, ETH_ALEN);
2065
2066 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2067 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2068 if (cmd.va == NULL)
2069 return -1;
2070 memset(cmd.va, 0, cmd.size);
2071
2072 if (enable) {
2073 status = pci_write_config_dword(adapter->pdev,
2074 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2075 if (status) {
2076 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002077 "Could not enable Wake-on-lan\n");
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002078 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2079 cmd.dma);
2080 return status;
2081 }
2082 status = be_cmd_enable_magic_wol(adapter,
2083 adapter->netdev->dev_addr, &cmd);
2084 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2085 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2086 } else {
2087 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2088 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2089 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2090 }
2091
2092 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2093 return status;
2094}
2095
Sathya Perla5fb379e2009-06-18 00:02:59 +00002096static int be_setup(struct be_adapter *adapter)
2097{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002098 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002099 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002101 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002103 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2104
2105 if (be_physfn(adapter)) {
2106 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2107 BE_IF_FLAGS_PROMISCUOUS |
2108 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2109 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2110 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002111
2112 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2113 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002114 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115 if (status != 0)
2116 goto do_none;
2117
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002118 if (be_physfn(adapter)) {
2119 while (vf < num_vfs) {
2120 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2121 | BE_IF_FLAGS_BROADCAST;
2122 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002123 mac, true,
2124 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002125 NULL, vf+1);
2126 if (status) {
2127 dev_err(&adapter->pdev->dev,
2128 "Interface Create failed for VF %d\n", vf);
2129 goto if_destroy;
2130 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002131 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002132 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002133 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002134 } else if (!be_physfn(adapter)) {
2135 status = be_cmd_mac_addr_query(adapter, mac,
2136 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2137 if (!status) {
2138 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2139 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2140 }
2141 }
2142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143 status = be_tx_queues_create(adapter);
2144 if (status != 0)
2145 goto if_destroy;
2146
2147 status = be_rx_queues_create(adapter);
2148 if (status != 0)
2149 goto tx_qs_destroy;
2150
Sathya Perla5fb379e2009-06-18 00:02:59 +00002151 status = be_mcc_queues_create(adapter);
2152 if (status != 0)
2153 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002155 adapter->link_speed = -1;
2156
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157 return 0;
2158
Sathya Perla5fb379e2009-06-18 00:02:59 +00002159rx_qs_destroy:
2160 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161tx_qs_destroy:
2162 be_tx_queues_destroy(adapter);
2163if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002164 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002165 if (adapter->vf_cfg[vf].vf_if_handle)
2166 be_cmd_if_destroy(adapter,
2167 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002168 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169do_none:
2170 return status;
2171}
2172
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173static int be_clear(struct be_adapter *adapter)
2174{
Sathya Perla1a8887d2009-08-17 00:58:41 +00002175 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176 be_rx_queues_destroy(adapter);
2177 be_tx_queues_destroy(adapter);
2178
Sathya Perla8788fdc2009-07-27 22:52:03 +00002179 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180
Sathya Perla2243e2e2009-11-22 22:02:03 +00002181 /* tell fw we're done with firing cmds */
2182 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183 return 0;
2184}
2185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186
Ajit Khaparde84517482009-09-04 03:12:16 +00002187#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2188char flash_cookie[2][16] = {"*** SE FLAS",
2189 "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002190
2191static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002192 const u8 *p, u32 img_start, int image_size,
2193 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002194{
2195 u32 crc_offset;
2196 u8 flashed_crc[4];
2197 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002198
2199 crc_offset = hdr_size + img_start + image_size - 4;
2200
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002201 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002202
2203 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002204 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002205 if (status) {
2206 dev_err(&adapter->pdev->dev,
2207 "could not get crc from flash, not flashing redboot\n");
2208 return false;
2209 }
2210
2211 /*update redboot only if crc does not match*/
2212 if (!memcmp(flashed_crc, p, 4))
2213 return false;
2214 else
2215 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002216}
2217
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002218static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002219 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002220 struct be_dma_mem *flash_cmd, int num_of_images)
2221
Ajit Khaparde84517482009-09-04 03:12:16 +00002222{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002223 int status = 0, i, filehdr_size = 0;
2224 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002225 int num_bytes;
2226 const u8 *p = fw->data;
2227 struct be_cmd_write_flashrom *req = flash_cmd->va;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002228 struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002229 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002230
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002231 struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002232 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2233 FLASH_IMAGE_MAX_SIZE_g3},
2234 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2235 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2236 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2237 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2238 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2239 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2240 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2241 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2242 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2243 FLASH_IMAGE_MAX_SIZE_g3},
2244 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2245 FLASH_IMAGE_MAX_SIZE_g3},
2246 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002247 FLASH_IMAGE_MAX_SIZE_g3},
2248 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2249 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002250 };
2251 struct flash_comp gen2_flash_types[8] = {
2252 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2253 FLASH_IMAGE_MAX_SIZE_g2},
2254 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2255 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2256 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2257 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2258 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2259 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2260 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2261 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2262 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2263 FLASH_IMAGE_MAX_SIZE_g2},
2264 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2265 FLASH_IMAGE_MAX_SIZE_g2},
2266 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2267 FLASH_IMAGE_MAX_SIZE_g2}
2268 };
2269
2270 if (adapter->generation == BE_GEN3) {
2271 pflashcomp = gen3_flash_types;
2272 filehdr_size = sizeof(struct flash_file_hdr_g3);
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002273 num_comp = 9;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002274 } else {
2275 pflashcomp = gen2_flash_types;
2276 filehdr_size = sizeof(struct flash_file_hdr_g2);
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002277 num_comp = 8;
Ajit Khaparde84517482009-09-04 03:12:16 +00002278 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002279 for (i = 0; i < num_comp; i++) {
2280 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2281 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2282 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002283 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2284 (!be_flash_redboot(adapter, fw->data,
2285 pflashcomp[i].offset, pflashcomp[i].size,
2286 filehdr_size)))
2287 continue;
2288 p = fw->data;
2289 p += filehdr_size + pflashcomp[i].offset
2290 + (num_of_images * sizeof(struct image_hdr));
2291 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002292 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002293 total_bytes = pflashcomp[i].size;
2294 while (total_bytes) {
2295 if (total_bytes > 32*1024)
2296 num_bytes = 32*1024;
2297 else
2298 num_bytes = total_bytes;
2299 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002300
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002301 if (!total_bytes)
2302 flash_op = FLASHROM_OPER_FLASH;
2303 else
2304 flash_op = FLASHROM_OPER_SAVE;
2305 memcpy(req->params.data_buf, p, num_bytes);
2306 p += num_bytes;
2307 status = be_cmd_write_flashrom(adapter, flash_cmd,
2308 pflashcomp[i].optype, flash_op, num_bytes);
2309 if (status) {
2310 dev_err(&adapter->pdev->dev,
2311 "cmd to write to flash rom failed.\n");
2312 return -1;
2313 }
2314 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002315 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002316 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002317 return 0;
2318}
2319
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002320static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2321{
2322 if (fhdr == NULL)
2323 return 0;
2324 if (fhdr->build[0] == '3')
2325 return BE_GEN3;
2326 else if (fhdr->build[0] == '2')
2327 return BE_GEN2;
2328 else
2329 return 0;
2330}
2331
Ajit Khaparde84517482009-09-04 03:12:16 +00002332int be_load_fw(struct be_adapter *adapter, u8 *func)
2333{
2334 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2335 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002336 struct flash_file_hdr_g2 *fhdr;
2337 struct flash_file_hdr_g3 *fhdr3;
2338 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002339 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002340 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002341 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002342
Ajit Khaparde84517482009-09-04 03:12:16 +00002343 strcpy(fw_file, func);
2344
2345 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2346 if (status)
2347 goto fw_exit;
2348
2349 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002350 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002351 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2352
Ajit Khaparde84517482009-09-04 03:12:16 +00002353 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2354 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2355 &flash_cmd.dma);
2356 if (!flash_cmd.va) {
2357 status = -ENOMEM;
2358 dev_err(&adapter->pdev->dev,
2359 "Memory allocation failure while flashing\n");
2360 goto fw_exit;
2361 }
2362
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002363 if ((adapter->generation == BE_GEN3) &&
2364 (get_ufigen_type(fhdr) == BE_GEN3)) {
2365 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002366 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2367 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002368 img_hdr_ptr = (struct image_hdr *) (fw->data +
2369 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002370 i * sizeof(struct image_hdr)));
2371 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2372 status = be_flash_data(adapter, fw, &flash_cmd,
2373 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002374 }
2375 } else if ((adapter->generation == BE_GEN2) &&
2376 (get_ufigen_type(fhdr) == BE_GEN2)) {
2377 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2378 } else {
2379 dev_err(&adapter->pdev->dev,
2380 "UFI and Interface are not compatible for flashing\n");
2381 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002382 }
2383
2384 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2385 flash_cmd.dma);
2386 if (status) {
2387 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2388 goto fw_exit;
2389 }
2390
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002391 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002392
2393fw_exit:
2394 release_firmware(fw);
2395 return status;
2396}
2397
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398static struct net_device_ops be_netdev_ops = {
2399 .ndo_open = be_open,
2400 .ndo_stop = be_close,
2401 .ndo_start_xmit = be_xmit,
2402 .ndo_get_stats = be_get_stats,
2403 .ndo_set_rx_mode = be_set_multicast_list,
2404 .ndo_set_mac_address = be_mac_addr_set,
2405 .ndo_change_mtu = be_change_mtu,
2406 .ndo_validate_addr = eth_validate_addr,
2407 .ndo_vlan_rx_register = be_vlan_register,
2408 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2409 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002410 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002411 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002412 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002413 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414};
2415
2416static void be_netdev_init(struct net_device *netdev)
2417{
2418 struct be_adapter *adapter = netdev_priv(netdev);
2419
2420 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Ajit Khaparde583e3f32009-10-05 02:22:19 +00002421 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
Ajit Khaparde49e4b842010-06-14 04:56:07 +00002422 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423
Ajit Khaparde51c59872009-11-29 17:54:54 +00002424 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2425
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426 netdev->flags |= IFF_MULTICAST;
2427
Ajit Khaparde728a9972009-04-13 15:41:22 -07002428 adapter->rx_csum = true;
2429
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002430 /* Default settings for Rx and Tx flow control */
2431 adapter->rx_fc = true;
2432 adapter->tx_fc = true;
2433
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002434 netif_set_gso_max_size(netdev, 65535);
2435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2437
2438 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2439
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2441 BE_NAPI_WEIGHT);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002442 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443 BE_NAPI_WEIGHT);
2444
2445 netif_carrier_off(netdev);
2446 netif_stop_queue(netdev);
2447}
2448
2449static void be_unmap_pci_bars(struct be_adapter *adapter)
2450{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002451 if (adapter->csr)
2452 iounmap(adapter->csr);
2453 if (adapter->db)
2454 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002455 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002456 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002457}
2458
2459static int be_map_pci_bars(struct be_adapter *adapter)
2460{
2461 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002462 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002464 if (be_physfn(adapter)) {
2465 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2466 pci_resource_len(adapter->pdev, 2));
2467 if (addr == NULL)
2468 return -ENOMEM;
2469 adapter->csr = addr;
2470 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002472 if (adapter->generation == BE_GEN2) {
2473 pcicfg_reg = 1;
2474 db_reg = 4;
2475 } else {
2476 pcicfg_reg = 0;
2477 if (be_physfn(adapter))
2478 db_reg = 4;
2479 else
2480 db_reg = 0;
2481 }
2482 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2483 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484 if (addr == NULL)
2485 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002486 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002487
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002488 if (be_physfn(adapter)) {
2489 addr = ioremap_nocache(
2490 pci_resource_start(adapter->pdev, pcicfg_reg),
2491 pci_resource_len(adapter->pdev, pcicfg_reg));
2492 if (addr == NULL)
2493 goto pci_map_err;
2494 adapter->pcicfg = addr;
2495 } else
2496 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
2498 return 0;
2499pci_map_err:
2500 be_unmap_pci_bars(adapter);
2501 return -ENOMEM;
2502}
2503
2504
2505static void be_ctrl_cleanup(struct be_adapter *adapter)
2506{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002507 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508
2509 be_unmap_pci_bars(adapter);
2510
2511 if (mem->va)
2512 pci_free_consistent(adapter->pdev, mem->size,
2513 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002514
2515 mem = &adapter->mc_cmd_mem;
2516 if (mem->va)
2517 pci_free_consistent(adapter->pdev, mem->size,
2518 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519}
2520
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521static int be_ctrl_init(struct be_adapter *adapter)
2522{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002523 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2524 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002525 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527
2528 status = be_map_pci_bars(adapter);
2529 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002530 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531
2532 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2533 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2534 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2535 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002536 status = -ENOMEM;
2537 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002539
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2541 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2542 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2543 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002544
2545 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2546 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2547 &mc_cmd_mem->dma);
2548 if (mc_cmd_mem->va == NULL) {
2549 status = -ENOMEM;
2550 goto free_mbox;
2551 }
2552 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2553
Sathya Perla8788fdc2009-07-27 22:52:03 +00002554 spin_lock_init(&adapter->mbox_lock);
2555 spin_lock_init(&adapter->mcc_lock);
2556 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002558 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002559 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002561
2562free_mbox:
2563 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2564 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2565
2566unmap_pci_bars:
2567 be_unmap_pci_bars(adapter);
2568
2569done:
2570 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571}
2572
2573static void be_stats_cleanup(struct be_adapter *adapter)
2574{
2575 struct be_stats_obj *stats = &adapter->stats;
2576 struct be_dma_mem *cmd = &stats->cmd;
2577
2578 if (cmd->va)
2579 pci_free_consistent(adapter->pdev, cmd->size,
2580 cmd->va, cmd->dma);
2581}
2582
2583static int be_stats_init(struct be_adapter *adapter)
2584{
2585 struct be_stats_obj *stats = &adapter->stats;
2586 struct be_dma_mem *cmd = &stats->cmd;
2587
2588 cmd->size = sizeof(struct be_cmd_req_get_stats);
2589 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2590 if (cmd->va == NULL)
2591 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002592 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 return 0;
2594}
2595
2596static void __devexit be_remove(struct pci_dev *pdev)
2597{
2598 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002599
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 if (!adapter)
2601 return;
2602
2603 unregister_netdev(adapter->netdev);
2604
Sathya Perla5fb379e2009-06-18 00:02:59 +00002605 be_clear(adapter);
2606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607 be_stats_cleanup(adapter);
2608
2609 be_ctrl_cleanup(adapter);
2610
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002611 be_sriov_disable(adapter);
2612
Sathya Perla8d56ff12009-11-22 22:02:26 +00002613 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614
2615 pci_set_drvdata(pdev, NULL);
2616 pci_release_regions(pdev);
2617 pci_disable_device(pdev);
2618
2619 free_netdev(adapter->netdev);
2620}
2621
Sathya Perla2243e2e2009-11-22 22:02:03 +00002622static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002625 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002626
Sathya Perla8788fdc2009-07-27 22:52:03 +00002627 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628 if (status)
2629 return status;
2630
Ajit Khapardedcb9b562009-09-30 21:58:22 -07002631 status = be_cmd_query_fw_cfg(adapter,
Ajit Khaparde3486be22010-07-23 02:04:54 +00002632 &adapter->port_num, &adapter->function_mode);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002633 if (status)
2634 return status;
2635
2636 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002637
2638 if (be_physfn(adapter)) {
2639 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002640 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002641
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002642 if (status)
2643 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002644
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002645 if (!is_valid_ether_addr(mac))
2646 return -EADDRNOTAVAIL;
2647
2648 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2649 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2650 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002651
Ajit Khaparde3486be22010-07-23 02:04:54 +00002652 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002653 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2654 else
2655 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2656
Sathya Perla2243e2e2009-11-22 22:02:03 +00002657 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002658}
2659
2660static int __devinit be_probe(struct pci_dev *pdev,
2661 const struct pci_device_id *pdev_id)
2662{
2663 int status = 0;
2664 struct be_adapter *adapter;
2665 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002666
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002667
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668 status = pci_enable_device(pdev);
2669 if (status)
2670 goto do_none;
2671
2672 status = pci_request_regions(pdev, DRV_NAME);
2673 if (status)
2674 goto disable_dev;
2675 pci_set_master(pdev);
2676
2677 netdev = alloc_etherdev(sizeof(struct be_adapter));
2678 if (netdev == NULL) {
2679 status = -ENOMEM;
2680 goto rel_reg;
2681 }
2682 adapter = netdev_priv(netdev);
Ajit Khaparde7b139c82010-01-27 21:56:44 +00002683
2684 switch (pdev->device) {
2685 case BE_DEVICE_ID1:
2686 case OC_DEVICE_ID1:
2687 adapter->generation = BE_GEN2;
2688 break;
2689 case BE_DEVICE_ID2:
2690 case OC_DEVICE_ID2:
2691 adapter->generation = BE_GEN3;
2692 break;
2693 default:
2694 adapter->generation = 0;
2695 }
2696
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697 adapter->pdev = pdev;
2698 pci_set_drvdata(pdev, adapter);
2699 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002700 be_netdev_init(netdev);
2701 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002702
2703 be_msix_enable(adapter);
2704
Yang Hongyange9304382009-04-13 14:40:14 -07002705 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706 if (!status) {
2707 netdev->features |= NETIF_F_HIGHDMA;
2708 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002709 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710 if (status) {
2711 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2712 goto free_netdev;
2713 }
2714 }
2715
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002716 be_sriov_enable(adapter);
2717
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718 status = be_ctrl_init(adapter);
2719 if (status)
2720 goto free_netdev;
2721
Sathya Perla2243e2e2009-11-22 22:02:03 +00002722 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002723 if (be_physfn(adapter)) {
2724 status = be_cmd_POST(adapter);
2725 if (status)
2726 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002727 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002728
2729 /* tell fw we're ready to fire cmds */
2730 status = be_cmd_fw_init(adapter);
2731 if (status)
2732 goto ctrl_clean;
2733
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002734 if (be_physfn(adapter)) {
2735 status = be_cmd_reset_function(adapter);
2736 if (status)
2737 goto ctrl_clean;
2738 }
2739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 status = be_stats_init(adapter);
2741 if (status)
2742 goto ctrl_clean;
2743
Sathya Perla2243e2e2009-11-22 22:02:03 +00002744 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745 if (status)
2746 goto stats_clean;
2747
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002748 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002749
Sathya Perla5fb379e2009-06-18 00:02:59 +00002750 status = be_setup(adapter);
2751 if (status)
2752 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754 status = register_netdev(netdev);
2755 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002756 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002757
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002758 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759 return 0;
2760
Sathya Perla5fb379e2009-06-18 00:02:59 +00002761unsetup:
2762 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002763stats_clean:
2764 be_stats_cleanup(adapter);
2765ctrl_clean:
2766 be_ctrl_cleanup(adapter);
2767free_netdev:
Sathya Perla8d56ff12009-11-22 22:02:26 +00002768 be_msix_disable(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002769 be_sriov_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770 free_netdev(adapter->netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002771 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002772rel_reg:
2773 pci_release_regions(pdev);
2774disable_dev:
2775 pci_disable_device(pdev);
2776do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002777 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778 return status;
2779}
2780
2781static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2782{
2783 struct be_adapter *adapter = pci_get_drvdata(pdev);
2784 struct net_device *netdev = adapter->netdev;
2785
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002786 if (adapter->wol)
2787 be_setup_wol(adapter, true);
2788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789 netif_device_detach(netdev);
2790 if (netif_running(netdev)) {
2791 rtnl_lock();
2792 be_close(netdev);
2793 rtnl_unlock();
2794 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002795 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002796 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002797
2798 pci_save_state(pdev);
2799 pci_disable_device(pdev);
2800 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2801 return 0;
2802}
2803
2804static int be_resume(struct pci_dev *pdev)
2805{
2806 int status = 0;
2807 struct be_adapter *adapter = pci_get_drvdata(pdev);
2808 struct net_device *netdev = adapter->netdev;
2809
2810 netif_device_detach(netdev);
2811
2812 status = pci_enable_device(pdev);
2813 if (status)
2814 return status;
2815
2816 pci_set_power_state(pdev, 0);
2817 pci_restore_state(pdev);
2818
Sathya Perla2243e2e2009-11-22 22:02:03 +00002819 /* tell fw we're ready to fire cmds */
2820 status = be_cmd_fw_init(adapter);
2821 if (status)
2822 return status;
2823
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002824 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002825 if (netif_running(netdev)) {
2826 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002827 be_open(netdev);
2828 rtnl_unlock();
2829 }
2830 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002831
2832 if (adapter->wol)
2833 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002834 return 0;
2835}
2836
Sathya Perla82456b02010-02-17 01:35:37 +00002837/*
2838 * An FLR will stop BE from DMAing any data.
2839 */
2840static void be_shutdown(struct pci_dev *pdev)
2841{
2842 struct be_adapter *adapter = pci_get_drvdata(pdev);
2843 struct net_device *netdev = adapter->netdev;
2844
2845 netif_device_detach(netdev);
2846
2847 be_cmd_reset_function(adapter);
2848
2849 if (adapter->wol)
2850 be_setup_wol(adapter, true);
2851
2852 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00002853}
2854
Sathya Perlacf588472010-02-14 21:22:01 +00002855static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2856 pci_channel_state_t state)
2857{
2858 struct be_adapter *adapter = pci_get_drvdata(pdev);
2859 struct net_device *netdev = adapter->netdev;
2860
2861 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2862
2863 adapter->eeh_err = true;
2864
2865 netif_device_detach(netdev);
2866
2867 if (netif_running(netdev)) {
2868 rtnl_lock();
2869 be_close(netdev);
2870 rtnl_unlock();
2871 }
2872 be_clear(adapter);
2873
2874 if (state == pci_channel_io_perm_failure)
2875 return PCI_ERS_RESULT_DISCONNECT;
2876
2877 pci_disable_device(pdev);
2878
2879 return PCI_ERS_RESULT_NEED_RESET;
2880}
2881
2882static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2883{
2884 struct be_adapter *adapter = pci_get_drvdata(pdev);
2885 int status;
2886
2887 dev_info(&adapter->pdev->dev, "EEH reset\n");
2888 adapter->eeh_err = false;
2889
2890 status = pci_enable_device(pdev);
2891 if (status)
2892 return PCI_ERS_RESULT_DISCONNECT;
2893
2894 pci_set_master(pdev);
2895 pci_set_power_state(pdev, 0);
2896 pci_restore_state(pdev);
2897
2898 /* Check if card is ok and fw is ready */
2899 status = be_cmd_POST(adapter);
2900 if (status)
2901 return PCI_ERS_RESULT_DISCONNECT;
2902
2903 return PCI_ERS_RESULT_RECOVERED;
2904}
2905
2906static void be_eeh_resume(struct pci_dev *pdev)
2907{
2908 int status = 0;
2909 struct be_adapter *adapter = pci_get_drvdata(pdev);
2910 struct net_device *netdev = adapter->netdev;
2911
2912 dev_info(&adapter->pdev->dev, "EEH resume\n");
2913
2914 pci_save_state(pdev);
2915
2916 /* tell fw we're ready to fire cmds */
2917 status = be_cmd_fw_init(adapter);
2918 if (status)
2919 goto err;
2920
2921 status = be_setup(adapter);
2922 if (status)
2923 goto err;
2924
2925 if (netif_running(netdev)) {
2926 status = be_open(netdev);
2927 if (status)
2928 goto err;
2929 }
2930 netif_device_attach(netdev);
2931 return;
2932err:
2933 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00002934}
2935
2936static struct pci_error_handlers be_eeh_handlers = {
2937 .error_detected = be_eeh_err_detected,
2938 .slot_reset = be_eeh_reset,
2939 .resume = be_eeh_resume,
2940};
2941
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942static struct pci_driver be_driver = {
2943 .name = DRV_NAME,
2944 .id_table = be_dev_ids,
2945 .probe = be_probe,
2946 .remove = be_remove,
2947 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00002948 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00002949 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00002950 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002951};
2952
2953static int __init be_init_module(void)
2954{
Joe Perches8e95a202009-12-03 07:58:21 +00002955 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2956 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002957 printk(KERN_WARNING DRV_NAME
2958 " : Module param rx_frag_size must be 2048/4096/8192."
2959 " Using 2048\n");
2960 rx_frag_size = 2048;
2961 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002963 if (num_vfs > 32) {
2964 printk(KERN_WARNING DRV_NAME
2965 " : Module param num_vfs must not be greater than 32."
2966 "Using 32\n");
2967 num_vfs = 32;
2968 }
2969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002970 return pci_register_driver(&be_driver);
2971}
2972module_init(be_init_module);
2973
2974static void __exit be_exit_module(void)
2975{
2976 pci_unregister_driver(&be_driver);
2977}
2978module_exit(be_exit_module);